initial arhitectural overhaul
Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
152
crates/libmarathon/src/db.rs
Normal file
152
crates/libmarathon/src/db.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
use rusqlite::{
|
||||
Connection,
|
||||
OpenFlags,
|
||||
Row,
|
||||
params,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
error::Result,
|
||||
models::*,
|
||||
};
|
||||
|
||||
pub struct ChatDb {
|
||||
conn: Connection,
|
||||
}
|
||||
|
||||
impl ChatDb {
|
||||
/// Open a connection to the chat database in read-only mode
|
||||
pub fn open(path: &str) -> Result<Self> {
|
||||
let conn = Connection::open_with_flags(path, OpenFlags::SQLITE_OPEN_READ_ONLY)?;
|
||||
Ok(Self { conn })
|
||||
}
|
||||
|
||||
/// Get messages from the conversation with +31 6 39 13 29 13
|
||||
///
|
||||
/// Returns messages from January 1, 2024 to present from the conversation
|
||||
/// with the specified Dutch phone number.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `start_date` - Start date (defaults to January 1, 2024 if None)
|
||||
/// * `end_date` - End date (defaults to current time if None)
|
||||
pub fn get_our_messages(
|
||||
&self,
|
||||
start_date: Option<chrono::DateTime<chrono::Utc>>,
|
||||
end_date: Option<chrono::DateTime<chrono::Utc>>,
|
||||
) -> Result<Vec<Message>> {
|
||||
use chrono::{
|
||||
TimeZone,
|
||||
Utc,
|
||||
};
|
||||
|
||||
// Default date range: January 1, 2024 to now
|
||||
let start =
|
||||
start_date.unwrap_or_else(|| Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap());
|
||||
let end = end_date.unwrap_or_else(|| Utc::now());
|
||||
|
||||
// Convert to Apple timestamps (nanoseconds since 2001-01-01)
|
||||
let start_timestamp = datetime_to_apple_timestamp(start);
|
||||
let end_timestamp = datetime_to_apple_timestamp(end);
|
||||
|
||||
// The phone number might be stored with or without spaces
|
||||
let phone_with_spaces = "+31 6 39 13 29 13";
|
||||
let phone_without_spaces = "+31639132913";
|
||||
|
||||
// Find the chat with this phone number (try both formats)
|
||||
let chat = self
|
||||
.get_chat_for_phone_number(phone_with_spaces)
|
||||
.or_else(|_| self.get_chat_for_phone_number(phone_without_spaces))?;
|
||||
|
||||
// Get messages from this chat within the date range
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT m.ROWID, m.guid, m.text, m.service, m.handle_id, m.date, m.date_read, m.date_delivered,
|
||||
m.is_from_me, m.is_read, m.is_delivered, m.is_sent, m.is_emote, m.is_audio_message,
|
||||
m.cache_has_attachments, m.associated_message_guid, m.associated_message_type,
|
||||
m.thread_originator_guid, m.reply_to_guid, m.is_spam
|
||||
FROM message m
|
||||
INNER JOIN chat_message_join cmj ON m.ROWID = cmj.message_id
|
||||
WHERE cmj.chat_id = ?
|
||||
AND m.date >= ?
|
||||
AND m.date <= ?
|
||||
ORDER BY m.date ASC"
|
||||
)?;
|
||||
|
||||
let messages = stmt
|
||||
.query_map(
|
||||
params![chat.rowid, start_timestamp, end_timestamp],
|
||||
map_message_row,
|
||||
)?
|
||||
.collect::<std::result::Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
/// Helper function to find the largest chat with a specific phone number
|
||||
fn get_chat_for_phone_number(&self, phone_number: &str) -> Result<Chat> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT c.ROWID, c.guid, c.chat_identifier, c.service_name, c.display_name,
|
||||
c.group_id, c.room_name, c.is_archived, c.is_filtered,
|
||||
c.last_read_message_timestamp, COUNT(cmj.message_id) as msg_count
|
||||
FROM chat c
|
||||
INNER JOIN chat_handle_join chj ON c.ROWID = chj.chat_id
|
||||
INNER JOIN handle h ON chj.handle_id = h.ROWID
|
||||
INNER JOIN chat_message_join cmj ON c.ROWID = cmj.chat_id
|
||||
WHERE h.id = ?
|
||||
GROUP BY c.ROWID
|
||||
ORDER BY msg_count DESC
|
||||
LIMIT 1",
|
||||
)?;
|
||||
|
||||
let chat = stmt.query_row(params![phone_number], |row| {
|
||||
Ok(Chat {
|
||||
rowid: row.get(0)?,
|
||||
guid: row.get(1)?,
|
||||
chat_identifier: row.get(2)?,
|
||||
service_name: row.get(3)?,
|
||||
display_name: row.get(4)?,
|
||||
group_id: row.get(5)?,
|
||||
room_name: row.get(6)?,
|
||||
is_archived: row.get::<_, i64>(7)? != 0,
|
||||
is_filtered: row.get::<_, i64>(8)? != 0,
|
||||
last_read_message_timestamp: row
|
||||
.get::<_, Option<i64>>(9)?
|
||||
.map(apple_timestamp_to_datetime),
|
||||
})
|
||||
})?;
|
||||
|
||||
Ok(chat)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to map database rows to structs
|
||||
fn map_message_row(row: &Row) -> rusqlite::Result<Message> {
|
||||
Ok(Message {
|
||||
rowid: row.get(0)?,
|
||||
guid: row.get(1)?,
|
||||
text: row.get(2)?,
|
||||
service: row.get(3)?,
|
||||
handle_id: row.get(4)?,
|
||||
date: row
|
||||
.get::<_, Option<i64>>(5)?
|
||||
.map(apple_timestamp_to_datetime),
|
||||
date_read: row
|
||||
.get::<_, Option<i64>>(6)?
|
||||
.map(apple_timestamp_to_datetime),
|
||||
date_delivered: row
|
||||
.get::<_, Option<i64>>(7)?
|
||||
.map(apple_timestamp_to_datetime),
|
||||
is_from_me: row.get::<_, i64>(8)? != 0,
|
||||
is_read: row.get::<_, i64>(9)? != 0,
|
||||
is_delivered: row.get::<_, i64>(10)? != 0,
|
||||
is_sent: row.get::<_, i64>(11)? != 0,
|
||||
is_emote: row.get::<_, i64>(12)? != 0,
|
||||
is_audio_message: row.get::<_, i64>(13)? != 0,
|
||||
cache_has_attachments: row.get::<_, i64>(14)? != 0,
|
||||
associated_message_guid: row.get(15)?,
|
||||
associated_message_type: row.get(16)?,
|
||||
thread_originator_guid: row.get(17)?,
|
||||
reply_to_guid: row.get(18)?,
|
||||
is_spam: row.get::<_, i64>(19)? != 0,
|
||||
})
|
||||
}
|
||||
72
crates/libmarathon/src/engine/bridge.rs
Normal file
72
crates/libmarathon/src/engine/bridge.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
//! Bridge between Bevy and Core Engine
|
||||
//!
|
||||
//! TODO(Phase 3): Create a Bevy-specific system (in app crate) that polls
|
||||
//! `EngineBridge::poll_events()` every tick and dispatches EngineEvents to Bevy
|
||||
//! (spawn entities, update transforms, update locks, emit Bevy messages, etc.)
|
||||
//!
|
||||
//! NOTE: The bridge is ECS-agnostic. Later we can create adapters for other engines
|
||||
//! like Flecs once we're closer to release.
|
||||
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, Mutex};
|
||||
use bevy::prelude::Resource;
|
||||
|
||||
use super::{EngineCommand, EngineEvent};
|
||||
|
||||
/// Shared bridge between Bevy and Core Engine
|
||||
#[derive(Clone, Resource)]
|
||||
pub struct EngineBridge {
|
||||
command_tx: mpsc::UnboundedSender<EngineCommand>,
|
||||
event_rx: Arc<Mutex<mpsc::UnboundedReceiver<EngineEvent>>>,
|
||||
}
|
||||
|
||||
/// Engine-side handle for receiving commands and sending events
|
||||
pub struct EngineHandle {
|
||||
pub(crate) command_rx: mpsc::UnboundedReceiver<EngineCommand>,
|
||||
pub(crate) event_tx: mpsc::UnboundedSender<EngineEvent>,
|
||||
}
|
||||
|
||||
impl EngineBridge {
|
||||
/// Create a new bridge and return both the Bevy-side bridge and Engine-side handle
|
||||
pub fn new() -> (Self, EngineHandle) {
|
||||
let (command_tx, command_rx) = mpsc::unbounded_channel();
|
||||
let (event_tx, event_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let bridge = Self {
|
||||
command_tx,
|
||||
event_rx: Arc::new(Mutex::new(event_rx)),
|
||||
};
|
||||
|
||||
let handle = EngineHandle {
|
||||
command_rx,
|
||||
event_tx,
|
||||
};
|
||||
|
||||
(bridge, handle)
|
||||
}
|
||||
|
||||
/// Send command from Bevy to Engine
|
||||
pub fn send_command(&self, cmd: EngineCommand) {
|
||||
// Ignore send errors (engine might be shut down)
|
||||
let _ = self.command_tx.send(cmd);
|
||||
}
|
||||
|
||||
/// Poll events from Engine to Bevy (non-blocking)
|
||||
/// Returns all available events in the queue
|
||||
pub fn poll_events(&self) -> Vec<EngineEvent> {
|
||||
let mut events = Vec::new();
|
||||
// Try to lock without blocking (returns immediately if locked)
|
||||
if let Ok(mut rx) = self.event_rx.try_lock() {
|
||||
while let Ok(event) = rx.try_recv() {
|
||||
events.push(event);
|
||||
}
|
||||
}
|
||||
events
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EngineBridge {
|
||||
fn default() -> Self {
|
||||
Self::new().0
|
||||
}
|
||||
}
|
||||
50
crates/libmarathon/src/engine/commands.rs
Normal file
50
crates/libmarathon/src/engine/commands.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
//! Commands sent from Bevy to the Core Engine
|
||||
|
||||
use crate::networking::SessionId;
|
||||
use bevy::prelude::*;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Commands that Bevy sends to the Core Engine
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum EngineCommand {
|
||||
// Networking lifecycle
|
||||
StartNetworking { session_id: SessionId },
|
||||
StopNetworking,
|
||||
JoinSession { session_id: SessionId },
|
||||
LeaveSession,
|
||||
|
||||
// CRDT operations
|
||||
SpawnEntity {
|
||||
entity_id: Uuid,
|
||||
position: Vec3,
|
||||
rotation: Quat,
|
||||
},
|
||||
UpdateTransform {
|
||||
entity_id: Uuid,
|
||||
position: Vec3,
|
||||
rotation: Quat,
|
||||
},
|
||||
DeleteEntity {
|
||||
entity_id: Uuid,
|
||||
},
|
||||
|
||||
// Lock operations
|
||||
AcquireLock {
|
||||
entity_id: Uuid,
|
||||
},
|
||||
ReleaseLock {
|
||||
entity_id: Uuid,
|
||||
},
|
||||
BroadcastHeartbeat {
|
||||
entity_id: Uuid,
|
||||
},
|
||||
|
||||
// Persistence
|
||||
SaveSession,
|
||||
LoadSession {
|
||||
session_id: SessionId,
|
||||
},
|
||||
|
||||
// Clock
|
||||
TickClock,
|
||||
}
|
||||
140
crates/libmarathon/src/engine/core.rs
Normal file
140
crates/libmarathon/src/engine/core.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
//! Core Engine event loop - runs on tokio outside Bevy
|
||||
|
||||
use tokio::task::JoinHandle;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::{EngineCommand, EngineEvent, EngineHandle, NetworkingManager, PersistenceManager};
|
||||
use crate::networking::{SessionId, VectorClock};
|
||||
|
||||
pub struct EngineCore {
|
||||
handle: EngineHandle,
|
||||
networking_task: Option<JoinHandle<()>>,
|
||||
#[allow(dead_code)]
|
||||
persistence: PersistenceManager,
|
||||
|
||||
// Clock state
|
||||
node_id: Uuid,
|
||||
clock: VectorClock,
|
||||
}
|
||||
|
||||
impl EngineCore {
|
||||
pub fn new(handle: EngineHandle, db_path: &str) -> Self {
|
||||
let persistence = PersistenceManager::new(db_path);
|
||||
let node_id = Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
tracing::info!("EngineCore node ID: {}", node_id);
|
||||
|
||||
Self {
|
||||
handle,
|
||||
networking_task: None, // Start offline
|
||||
persistence,
|
||||
node_id,
|
||||
clock,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the engine event loop (runs on tokio)
|
||||
/// Processes commands unbounded - tokio handles internal polling
|
||||
pub async fn run(mut self) {
|
||||
tracing::info!("EngineCore starting (unbounded)...");
|
||||
|
||||
// Process commands as they arrive
|
||||
while let Some(cmd) = self.handle.command_rx.recv().await {
|
||||
self.handle_command(cmd).await;
|
||||
}
|
||||
|
||||
tracing::info!("EngineCore shutting down (command channel closed)");
|
||||
}
|
||||
|
||||
async fn handle_command(&mut self, cmd: EngineCommand) {
|
||||
match cmd {
|
||||
EngineCommand::StartNetworking { session_id } => {
|
||||
self.start_networking(session_id).await;
|
||||
}
|
||||
EngineCommand::StopNetworking => {
|
||||
self.stop_networking().await;
|
||||
}
|
||||
EngineCommand::JoinSession { session_id } => {
|
||||
self.join_session(session_id).await;
|
||||
}
|
||||
EngineCommand::LeaveSession => {
|
||||
self.stop_networking().await;
|
||||
}
|
||||
EngineCommand::SaveSession => {
|
||||
// TODO: Save current session state
|
||||
tracing::debug!("SaveSession command received (stub)");
|
||||
}
|
||||
EngineCommand::LoadSession { session_id } => {
|
||||
tracing::debug!("LoadSession command received for {} (stub)", session_id.to_code());
|
||||
}
|
||||
EngineCommand::TickClock => {
|
||||
self.tick_clock();
|
||||
}
|
||||
// TODO: Handle CRDT and lock commands in Phase 2
|
||||
_ => {
|
||||
tracing::debug!("Unhandled command: {:?}", cmd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn tick_clock(&mut self) {
|
||||
let seq = self.clock.increment(self.node_id);
|
||||
let _ = self.handle.event_tx.send(EngineEvent::ClockTicked {
|
||||
sequence: seq,
|
||||
clock: self.clock.clone(),
|
||||
});
|
||||
tracing::debug!("Clock ticked to {}", seq);
|
||||
}
|
||||
|
||||
async fn start_networking(&mut self, session_id: SessionId) {
|
||||
if self.networking_task.is_some() {
|
||||
tracing::warn!("Networking already started");
|
||||
return;
|
||||
}
|
||||
|
||||
match NetworkingManager::new(session_id.clone()).await {
|
||||
Ok(net_manager) => {
|
||||
let node_id = net_manager.node_id();
|
||||
|
||||
// Spawn NetworkingManager in background task
|
||||
let event_tx = self.handle.event_tx.clone();
|
||||
let task = tokio::spawn(async move {
|
||||
net_manager.run(event_tx).await;
|
||||
});
|
||||
|
||||
self.networking_task = Some(task);
|
||||
|
||||
let _ = self.handle.event_tx.send(EngineEvent::NetworkingStarted {
|
||||
session_id: session_id.clone(),
|
||||
node_id,
|
||||
});
|
||||
tracing::info!("Networking started for session {}", session_id.to_code());
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = self.handle.event_tx.send(EngineEvent::NetworkingFailed {
|
||||
error: e.to_string(),
|
||||
});
|
||||
tracing::error!("Failed to start networking: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn stop_networking(&mut self) {
|
||||
if let Some(task) = self.networking_task.take() {
|
||||
task.abort(); // Cancel the networking task
|
||||
let _ = self.handle.event_tx.send(EngineEvent::NetworkingStopped);
|
||||
tracing::info!("Networking stopped");
|
||||
}
|
||||
}
|
||||
|
||||
async fn join_session(&mut self, session_id: SessionId) {
|
||||
// Stop existing networking if any
|
||||
if self.networking_task.is_some() {
|
||||
self.stop_networking().await;
|
||||
}
|
||||
|
||||
// Start networking with new session
|
||||
self.start_networking(session_id).await;
|
||||
}
|
||||
}
|
||||
71
crates/libmarathon/src/engine/events.rs
Normal file
71
crates/libmarathon/src/engine/events.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
//! Events emitted from the Core Engine to Bevy
|
||||
|
||||
use crate::networking::{NodeId, SessionId, VectorClock};
|
||||
use bevy::prelude::*;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Events that the Core Engine emits to Bevy
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum EngineEvent {
|
||||
// Networking status
|
||||
NetworkingStarted {
|
||||
session_id: SessionId,
|
||||
node_id: NodeId,
|
||||
},
|
||||
NetworkingFailed {
|
||||
error: String,
|
||||
},
|
||||
NetworkingStopped,
|
||||
SessionJoined {
|
||||
session_id: SessionId,
|
||||
},
|
||||
SessionLeft,
|
||||
|
||||
// Peer events
|
||||
PeerJoined {
|
||||
node_id: NodeId,
|
||||
},
|
||||
PeerLeft {
|
||||
node_id: NodeId,
|
||||
},
|
||||
|
||||
// CRDT sync events
|
||||
EntitySpawned {
|
||||
entity_id: Uuid,
|
||||
position: Vec3,
|
||||
rotation: Quat,
|
||||
version: VectorClock,
|
||||
},
|
||||
EntityUpdated {
|
||||
entity_id: Uuid,
|
||||
position: Vec3,
|
||||
rotation: Quat,
|
||||
version: VectorClock,
|
||||
},
|
||||
EntityDeleted {
|
||||
entity_id: Uuid,
|
||||
version: VectorClock,
|
||||
},
|
||||
|
||||
// Lock events
|
||||
LockAcquired {
|
||||
entity_id: Uuid,
|
||||
holder: NodeId,
|
||||
},
|
||||
LockReleased {
|
||||
entity_id: Uuid,
|
||||
},
|
||||
LockDenied {
|
||||
entity_id: Uuid,
|
||||
current_holder: NodeId,
|
||||
},
|
||||
LockExpired {
|
||||
entity_id: Uuid,
|
||||
},
|
||||
|
||||
// Clock events
|
||||
ClockTicked {
|
||||
sequence: u64,
|
||||
clock: VectorClock,
|
||||
},
|
||||
}
|
||||
118
crates/libmarathon/src/engine/game_actions.rs
Normal file
118
crates/libmarathon/src/engine/game_actions.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
//! Semantic game actions
|
||||
//!
|
||||
//! Actions represent what the player wants to do, independent of how they
|
||||
//! triggered it. This enables input remapping and accessibility.
|
||||
|
||||
use glam::Vec2;
|
||||
|
||||
/// High-level game actions that result from input processing
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum GameAction {
|
||||
/// Move an entity in 2D (XY plane)
|
||||
MoveEntity {
|
||||
/// Movement delta (in screen/world space)
|
||||
delta: Vec2,
|
||||
},
|
||||
|
||||
/// Rotate an entity
|
||||
RotateEntity {
|
||||
/// Rotation delta (yaw, pitch)
|
||||
delta: Vec2,
|
||||
},
|
||||
|
||||
/// Move entity along Z axis (depth)
|
||||
MoveEntityDepth {
|
||||
/// Depth delta
|
||||
delta: f32,
|
||||
},
|
||||
|
||||
/// Select/deselect an entity at a position
|
||||
SelectEntity {
|
||||
/// Screen position
|
||||
position: Vec2,
|
||||
},
|
||||
|
||||
/// Begin dragging at a position
|
||||
BeginDrag {
|
||||
/// Screen position
|
||||
position: Vec2,
|
||||
},
|
||||
|
||||
/// Continue dragging
|
||||
ContinueDrag {
|
||||
/// Current screen position
|
||||
position: Vec2,
|
||||
/// Delta since last drag event
|
||||
delta: Vec2,
|
||||
},
|
||||
|
||||
/// End dragging
|
||||
EndDrag {
|
||||
/// Final screen position
|
||||
position: Vec2,
|
||||
},
|
||||
|
||||
/// Reset entity to default state
|
||||
ResetEntity,
|
||||
|
||||
/// Delete selected entity
|
||||
DeleteEntity,
|
||||
|
||||
/// Spawn new entity at position
|
||||
SpawnEntity {
|
||||
/// Screen position
|
||||
position: Vec2,
|
||||
},
|
||||
|
||||
/// Camera movement
|
||||
MoveCamera {
|
||||
/// Movement delta
|
||||
delta: Vec2,
|
||||
},
|
||||
|
||||
/// Camera zoom
|
||||
ZoomCamera {
|
||||
/// Zoom delta
|
||||
delta: f32,
|
||||
},
|
||||
|
||||
/// Toggle UI panel
|
||||
ToggleUI,
|
||||
|
||||
/// Confirm action (Enter, Space, etc.)
|
||||
Confirm,
|
||||
|
||||
/// Cancel action (Escape, etc.)
|
||||
Cancel,
|
||||
|
||||
/// Undo last action
|
||||
Undo,
|
||||
|
||||
/// Redo last undone action
|
||||
Redo,
|
||||
}
|
||||
|
||||
impl GameAction {
|
||||
/// Get a human-readable description of this action
|
||||
pub fn description(&self) -> &'static str {
|
||||
match self {
|
||||
GameAction::MoveEntity { .. } => "Move entity in XY plane",
|
||||
GameAction::RotateEntity { .. } => "Rotate entity",
|
||||
GameAction::MoveEntityDepth { .. } => "Move entity along Z axis",
|
||||
GameAction::SelectEntity { .. } => "Select/deselect entity",
|
||||
GameAction::BeginDrag { .. } => "Begin dragging",
|
||||
GameAction::ContinueDrag { .. } => "Continue dragging",
|
||||
GameAction::EndDrag { .. } => "End dragging",
|
||||
GameAction::ResetEntity => "Reset entity to default",
|
||||
GameAction::DeleteEntity => "Delete selected entity",
|
||||
GameAction::SpawnEntity { .. } => "Spawn new entity",
|
||||
GameAction::MoveCamera { .. } => "Move camera",
|
||||
GameAction::ZoomCamera { .. } => "Zoom camera",
|
||||
GameAction::ToggleUI => "Toggle UI panel",
|
||||
GameAction::Confirm => "Confirm",
|
||||
GameAction::Cancel => "Cancel",
|
||||
GameAction::Undo => "Undo",
|
||||
GameAction::Redo => "Redo",
|
||||
}
|
||||
}
|
||||
}
|
||||
337
crates/libmarathon/src/engine/input_controller.rs
Normal file
337
crates/libmarathon/src/engine/input_controller.rs
Normal file
@@ -0,0 +1,337 @@
|
||||
//! Input controller - maps raw InputEvents to semantic GameActions
|
||||
//!
|
||||
//! This layer provides:
|
||||
//! - Input remapping (change key bindings)
|
||||
//! - Accessibility (alternative input methods)
|
||||
//! - Context-aware bindings (different actions in different modes)
|
||||
|
||||
use super::game_actions::GameAction;
|
||||
use super::input_events::{InputEvent, KeyCode, MouseButton, TouchPhase};
|
||||
use glam::Vec2;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Input binding - maps an input trigger to a game action
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum InputBinding {
|
||||
/// Mouse button press/release
|
||||
MouseButton(MouseButton),
|
||||
|
||||
/// Mouse drag with a specific button
|
||||
MouseDrag(MouseButton),
|
||||
|
||||
/// Mouse wheel scroll
|
||||
MouseWheel,
|
||||
|
||||
/// Keyboard key press
|
||||
Key(KeyCode),
|
||||
|
||||
/// Keyboard key with modifiers
|
||||
KeyWithModifiers {
|
||||
key: KeyCode,
|
||||
shift: bool,
|
||||
ctrl: bool,
|
||||
alt: bool,
|
||||
meta: bool,
|
||||
},
|
||||
|
||||
/// Stylus input (Apple Pencil, etc.)
|
||||
StylusDrag,
|
||||
|
||||
/// Touch input
|
||||
TouchDrag,
|
||||
}
|
||||
|
||||
/// Input context - different binding sets for different game modes
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum InputContext {
|
||||
/// Manipulating 3D entities
|
||||
EntityManipulation,
|
||||
|
||||
/// Camera control
|
||||
CameraControl,
|
||||
|
||||
/// UI interaction
|
||||
UI,
|
||||
|
||||
/// Text input
|
||||
TextInput,
|
||||
}
|
||||
|
||||
/// Accessibility settings for input processing
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AccessibilitySettings {
|
||||
/// Mouse sensitivity multiplier (1.0 = normal)
|
||||
pub mouse_sensitivity: f32,
|
||||
|
||||
/// Scroll sensitivity multiplier (1.0 = normal)
|
||||
pub scroll_sensitivity: f32,
|
||||
|
||||
/// Stylus pressure sensitivity (1.0 = normal)
|
||||
pub stylus_sensitivity: f32,
|
||||
|
||||
/// Enable one-handed mode (use keyboard for rotation)
|
||||
pub one_handed_mode: bool,
|
||||
|
||||
/// Invert Y axis for rotation
|
||||
pub invert_y: bool,
|
||||
|
||||
/// Minimum drag distance before registering as drag (in pixels)
|
||||
pub drag_threshold: f32,
|
||||
}
|
||||
|
||||
impl Default for AccessibilitySettings {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mouse_sensitivity: 1.0,
|
||||
scroll_sensitivity: 1.0,
|
||||
stylus_sensitivity: 1.0,
|
||||
one_handed_mode: false,
|
||||
invert_y: false,
|
||||
drag_threshold: 2.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Input controller - converts InputEvents to GameActions
|
||||
pub struct InputController {
|
||||
/// Current input context
|
||||
current_context: InputContext,
|
||||
|
||||
/// Bindings for each context
|
||||
bindings: HashMap<InputContext, HashMap<InputBinding, GameAction>>,
|
||||
|
||||
/// Accessibility settings
|
||||
accessibility: AccessibilitySettings,
|
||||
|
||||
/// Drag state tracking
|
||||
drag_state: DragState,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct DragState {
|
||||
/// Is currently dragging
|
||||
active: bool,
|
||||
|
||||
/// Which button/input is dragging
|
||||
source: Option<DragSource>,
|
||||
|
||||
/// Start position
|
||||
start_pos: Vec2,
|
||||
|
||||
/// Last position
|
||||
last_pos: Vec2,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
enum DragSource {
|
||||
MouseLeft,
|
||||
MouseRight,
|
||||
Stylus,
|
||||
Touch,
|
||||
}
|
||||
|
||||
impl InputController {
|
||||
/// Create a new input controller with default bindings
|
||||
pub fn new() -> Self {
|
||||
let mut controller = Self {
|
||||
current_context: InputContext::EntityManipulation,
|
||||
bindings: HashMap::new(),
|
||||
accessibility: AccessibilitySettings::default(),
|
||||
drag_state: DragState::default(),
|
||||
};
|
||||
|
||||
controller.setup_default_bindings();
|
||||
controller
|
||||
}
|
||||
|
||||
/// Set the current input context
|
||||
pub fn set_context(&mut self, context: InputContext) {
|
||||
self.current_context = context;
|
||||
}
|
||||
|
||||
/// Get the current context
|
||||
pub fn context(&self) -> InputContext {
|
||||
self.current_context
|
||||
}
|
||||
|
||||
/// Update accessibility settings
|
||||
pub fn set_accessibility(&mut self, settings: AccessibilitySettings) {
|
||||
self.accessibility = settings;
|
||||
}
|
||||
|
||||
/// Get current accessibility settings
|
||||
pub fn accessibility(&self) -> &AccessibilitySettings {
|
||||
&self.accessibility
|
||||
}
|
||||
|
||||
/// Process an input event and produce game actions
|
||||
pub fn process_event(&mut self, event: &InputEvent) -> Vec<GameAction> {
|
||||
let mut actions = Vec::new();
|
||||
|
||||
match event {
|
||||
InputEvent::Mouse { pos, button, phase } => {
|
||||
self.process_mouse(*pos, *button, *phase, &mut actions);
|
||||
}
|
||||
|
||||
InputEvent::MouseWheel { delta, pos: _ } => {
|
||||
let adjusted_delta = delta.y * self.accessibility.scroll_sensitivity;
|
||||
actions.push(GameAction::MoveEntityDepth { delta: adjusted_delta });
|
||||
}
|
||||
|
||||
InputEvent::Keyboard { key, pressed, modifiers: _ } => {
|
||||
if *pressed {
|
||||
self.process_key(*key, &mut actions);
|
||||
}
|
||||
}
|
||||
|
||||
InputEvent::Stylus { pos, pressure: _, tilt: _, phase, timestamp: _ } => {
|
||||
self.process_stylus(*pos, *phase, &mut actions);
|
||||
}
|
||||
|
||||
InputEvent::Touch { pos, phase, id: _ } => {
|
||||
self.process_touch(*pos, *phase, &mut actions);
|
||||
}
|
||||
}
|
||||
|
||||
actions
|
||||
}
|
||||
|
||||
/// Process mouse input
|
||||
fn process_mouse(&mut self, pos: Vec2, button: MouseButton, phase: TouchPhase, actions: &mut Vec<GameAction>) {
|
||||
match phase {
|
||||
TouchPhase::Started => {
|
||||
// Single click = select
|
||||
actions.push(GameAction::SelectEntity { position: pos });
|
||||
|
||||
// Start drag tracking
|
||||
self.drag_state.active = true;
|
||||
self.drag_state.source = Some(match button {
|
||||
MouseButton::Left => DragSource::MouseLeft,
|
||||
MouseButton::Right => DragSource::MouseRight,
|
||||
MouseButton::Middle => return, // Don't handle middle button
|
||||
});
|
||||
self.drag_state.start_pos = pos;
|
||||
self.drag_state.last_pos = pos;
|
||||
|
||||
actions.push(GameAction::BeginDrag { position: pos });
|
||||
}
|
||||
|
||||
TouchPhase::Moved => {
|
||||
if self.drag_state.active {
|
||||
let delta = (pos - self.drag_state.last_pos) * self.accessibility.mouse_sensitivity;
|
||||
self.drag_state.last_pos = pos;
|
||||
|
||||
// Check if we've exceeded drag threshold
|
||||
let total_delta = pos - self.drag_state.start_pos;
|
||||
if total_delta.length() < self.accessibility.drag_threshold {
|
||||
return; // Too small to count as drag
|
||||
}
|
||||
|
||||
actions.push(GameAction::ContinueDrag { position: pos, delta });
|
||||
|
||||
// Context-specific drag actions
|
||||
match self.current_context {
|
||||
InputContext::EntityManipulation => {
|
||||
match self.drag_state.source {
|
||||
Some(DragSource::MouseLeft) => {
|
||||
actions.push(GameAction::MoveEntity { delta });
|
||||
}
|
||||
Some(DragSource::MouseRight) => {
|
||||
let adjusted_delta = if self.accessibility.invert_y {
|
||||
Vec2::new(delta.x, -delta.y)
|
||||
} else {
|
||||
delta
|
||||
};
|
||||
actions.push(GameAction::RotateEntity { delta: adjusted_delta });
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
InputContext::CameraControl => {
|
||||
actions.push(GameAction::MoveCamera { delta });
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TouchPhase::Ended | TouchPhase::Cancelled => {
|
||||
if self.drag_state.active {
|
||||
actions.push(GameAction::EndDrag { position: pos });
|
||||
self.drag_state.active = false;
|
||||
self.drag_state.source = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process keyboard input
|
||||
fn process_key(&mut self, key: KeyCode, actions: &mut Vec<GameAction>) {
|
||||
match key {
|
||||
KeyCode::KeyR => actions.push(GameAction::ResetEntity),
|
||||
KeyCode::Delete | KeyCode::Backspace => actions.push(GameAction::DeleteEntity),
|
||||
KeyCode::KeyZ if self.accessibility.one_handed_mode => {
|
||||
// In one-handed mode, Z key can trigger actions
|
||||
actions.push(GameAction::Undo);
|
||||
}
|
||||
KeyCode::Escape => actions.push(GameAction::Cancel),
|
||||
KeyCode::Enter => actions.push(GameAction::Confirm),
|
||||
KeyCode::Tab => actions.push(GameAction::ToggleUI),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process stylus input (Apple Pencil, etc.)
|
||||
fn process_stylus(&mut self, pos: Vec2, phase: TouchPhase, actions: &mut Vec<GameAction>) {
|
||||
match phase {
|
||||
TouchPhase::Started => {
|
||||
actions.push(GameAction::SelectEntity { position: pos });
|
||||
actions.push(GameAction::BeginDrag { position: pos });
|
||||
self.drag_state.active = true;
|
||||
self.drag_state.source = Some(DragSource::Stylus);
|
||||
self.drag_state.start_pos = pos;
|
||||
self.drag_state.last_pos = pos;
|
||||
}
|
||||
|
||||
TouchPhase::Moved => {
|
||||
if self.drag_state.active {
|
||||
let delta = (pos - self.drag_state.last_pos) * self.accessibility.stylus_sensitivity;
|
||||
self.drag_state.last_pos = pos;
|
||||
|
||||
actions.push(GameAction::ContinueDrag { position: pos, delta });
|
||||
actions.push(GameAction::MoveEntity { delta });
|
||||
}
|
||||
}
|
||||
|
||||
TouchPhase::Ended | TouchPhase::Cancelled => {
|
||||
if self.drag_state.active {
|
||||
actions.push(GameAction::EndDrag { position: pos });
|
||||
self.drag_state.active = false;
|
||||
self.drag_state.source = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process touch input
|
||||
fn process_touch(&mut self, pos: Vec2, phase: TouchPhase, actions: &mut Vec<GameAction>) {
|
||||
// For now, treat touch like stylus
|
||||
self.process_stylus(pos, phase, actions);
|
||||
}
|
||||
|
||||
/// Set up default input bindings
|
||||
fn setup_default_bindings(&mut self) {
|
||||
// For now, bindings are hardcoded in process_event
|
||||
// Later, we can make this fully data-driven
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for InputController {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "input_controller_tests.rs"]
|
||||
mod tests;
|
||||
326
crates/libmarathon/src/engine/input_controller_tests.rs
Normal file
326
crates/libmarathon/src/engine/input_controller_tests.rs
Normal file
@@ -0,0 +1,326 @@
|
||||
//! Unit tests for InputController
|
||||
|
||||
use super::{AccessibilitySettings, InputContext, InputController};
|
||||
use crate::engine::game_actions::GameAction;
|
||||
use crate::engine::input_events::{InputEvent, KeyCode, MouseButton, TouchPhase};
|
||||
use glam::Vec2;
|
||||
|
||||
#[test]
|
||||
fn test_mouse_left_drag_produces_move_entity() {
|
||||
let mut controller = InputController::new();
|
||||
|
||||
// Mouse down at (100, 100)
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(100.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Started,
|
||||
});
|
||||
|
||||
// Should select entity and begin drag
|
||||
assert!(actions.iter().any(|a| matches!(a, GameAction::SelectEntity { .. })));
|
||||
assert!(actions.iter().any(|a| matches!(a, GameAction::BeginDrag { .. })));
|
||||
|
||||
// Mouse drag to (150, 120)
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(150.0, 120.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Moved,
|
||||
});
|
||||
|
||||
// Should produce MoveEntity with delta
|
||||
let move_action = actions.iter().find_map(|a| {
|
||||
if let GameAction::MoveEntity { delta } = a {
|
||||
Some(delta)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
assert!(move_action.is_some());
|
||||
let delta = move_action.unwrap();
|
||||
assert_eq!(*delta, Vec2::new(50.0, 20.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mouse_right_drag_produces_rotate_entity() {
|
||||
let mut controller = InputController::new();
|
||||
|
||||
// Right mouse down
|
||||
controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(100.0, 100.0),
|
||||
button: MouseButton::Right,
|
||||
phase: TouchPhase::Started,
|
||||
});
|
||||
|
||||
// Right mouse drag
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(120.0, 130.0),
|
||||
button: MouseButton::Right,
|
||||
phase: TouchPhase::Moved,
|
||||
});
|
||||
|
||||
// Should produce RotateEntity
|
||||
assert!(actions.iter().any(|a| matches!(a, GameAction::RotateEntity { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mouse_wheel_produces_depth_movement() {
|
||||
let mut controller = InputController::new();
|
||||
|
||||
let actions = controller.process_event(&InputEvent::MouseWheel {
|
||||
delta: Vec2::new(0.0, 10.0),
|
||||
pos: Vec2::new(100.0, 100.0),
|
||||
});
|
||||
|
||||
// Should produce MoveEntityDepth
|
||||
let depth_action = actions.iter().find_map(|a| {
|
||||
if let GameAction::MoveEntityDepth { delta } = a {
|
||||
Some(*delta)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
assert_eq!(depth_action, Some(10.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keyboard_r_resets_entity() {
|
||||
let mut controller = InputController::new();
|
||||
|
||||
let actions = controller.process_event(&InputEvent::Keyboard {
|
||||
key: KeyCode::KeyR,
|
||||
pressed: true,
|
||||
modifiers: Default::default(),
|
||||
});
|
||||
|
||||
assert!(actions.contains(&GameAction::ResetEntity));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keyboard_delete_removes_entity() {
|
||||
let mut controller = InputController::new();
|
||||
|
||||
let actions = controller.process_event(&InputEvent::Keyboard {
|
||||
key: KeyCode::Delete,
|
||||
pressed: true,
|
||||
modifiers: Default::default(),
|
||||
});
|
||||
|
||||
assert!(actions.contains(&GameAction::DeleteEntity));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drag_threshold_prevents_tiny_movements() {
|
||||
let mut controller = InputController::new();
|
||||
controller.set_accessibility(AccessibilitySettings {
|
||||
drag_threshold: 10.0,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Start drag
|
||||
controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(100.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Started,
|
||||
});
|
||||
|
||||
// Move only 2 pixels (below threshold of 10)
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(102.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Moved,
|
||||
});
|
||||
|
||||
// Should NOT produce MoveEntity (below threshold)
|
||||
assert!(!actions.iter().any(|a| matches!(a, GameAction::MoveEntity { .. })));
|
||||
|
||||
// Move 15 pixels total (above threshold)
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(115.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Moved,
|
||||
});
|
||||
|
||||
// NOW should produce MoveEntity
|
||||
assert!(actions.iter().any(|a| matches!(a, GameAction::MoveEntity { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mouse_sensitivity_multiplier() {
|
||||
let mut controller = InputController::new();
|
||||
controller.set_accessibility(AccessibilitySettings {
|
||||
mouse_sensitivity: 2.0,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Start drag
|
||||
controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(100.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Started,
|
||||
});
|
||||
|
||||
// Move 10 pixels
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(110.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Moved,
|
||||
});
|
||||
|
||||
// Delta should be doubled (10 * 2.0 = 20)
|
||||
let delta = actions.iter().find_map(|a| {
|
||||
if let GameAction::MoveEntity { delta } = a {
|
||||
Some(*delta)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
assert_eq!(delta, Some(Vec2::new(20.0, 0.0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invert_y_axis() {
|
||||
let mut controller = InputController::new();
|
||||
controller.set_accessibility(AccessibilitySettings {
|
||||
invert_y: true,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Start right-click drag
|
||||
controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(100.0, 100.0),
|
||||
button: MouseButton::Right,
|
||||
phase: TouchPhase::Started,
|
||||
});
|
||||
|
||||
// Drag down (positive Y)
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(100.0, 110.0),
|
||||
button: MouseButton::Right,
|
||||
phase: TouchPhase::Moved,
|
||||
});
|
||||
|
||||
// Y delta should be inverted
|
||||
let delta = actions.iter().find_map(|a| {
|
||||
if let GameAction::RotateEntity { delta } = a {
|
||||
Some(*delta)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
assert!(delta.is_some());
|
||||
assert!(delta.unwrap().y < 0.0); // Should be negative (inverted)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drag_sequence_produces_begin_continue_end() {
|
||||
let mut controller = InputController::new();
|
||||
|
||||
// Started
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(100.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Started,
|
||||
});
|
||||
assert!(actions.iter().any(|a| matches!(a, GameAction::BeginDrag { .. })));
|
||||
|
||||
// Moved
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(150.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Moved,
|
||||
});
|
||||
assert!(actions.iter().any(|a| matches!(a, GameAction::ContinueDrag { .. })));
|
||||
|
||||
// Ended
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(150.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Ended,
|
||||
});
|
||||
assert!(actions.iter().any(|a| matches!(a, GameAction::EndDrag { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stylus_produces_move_entity() {
|
||||
let mut controller = InputController::new();
|
||||
|
||||
// Stylus down
|
||||
controller.process_event(&InputEvent::Stylus {
|
||||
pos: Vec2::new(100.0, 100.0),
|
||||
pressure: 0.5,
|
||||
tilt: Vec2::ZERO,
|
||||
phase: TouchPhase::Started,
|
||||
timestamp: 0.0,
|
||||
});
|
||||
|
||||
// Stylus drag
|
||||
let actions = controller.process_event(&InputEvent::Stylus {
|
||||
pos: Vec2::new(150.0, 120.0),
|
||||
pressure: 0.8,
|
||||
tilt: Vec2::ZERO,
|
||||
phase: TouchPhase::Moved,
|
||||
timestamp: 0.016,
|
||||
});
|
||||
|
||||
// Should produce MoveEntity
|
||||
assert!(actions.iter().any(|a| matches!(a, GameAction::MoveEntity { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_context_switching() {
|
||||
let mut controller = InputController::new();
|
||||
|
||||
// Start in EntityManipulation context
|
||||
assert_eq!(controller.context(), InputContext::EntityManipulation);
|
||||
|
||||
// Switch to CameraControl
|
||||
controller.set_context(InputContext::CameraControl);
|
||||
assert_eq!(controller.context(), InputContext::CameraControl);
|
||||
|
||||
// Start drag
|
||||
controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(100.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Started,
|
||||
});
|
||||
|
||||
// Drag in CameraControl context
|
||||
let actions = controller.process_event(&InputEvent::Mouse {
|
||||
pos: Vec2::new(150.0, 100.0),
|
||||
button: MouseButton::Left,
|
||||
phase: TouchPhase::Moved,
|
||||
});
|
||||
|
||||
// Should produce MoveCamera instead of MoveEntity
|
||||
assert!(actions.iter().any(|a| matches!(a, GameAction::MoveCamera { .. })));
|
||||
assert!(!actions.iter().any(|a| matches!(a, GameAction::MoveEntity { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scroll_sensitivity() {
|
||||
let mut controller = InputController::new();
|
||||
controller.set_accessibility(AccessibilitySettings {
|
||||
scroll_sensitivity: 3.0,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let actions = controller.process_event(&InputEvent::MouseWheel {
|
||||
delta: Vec2::new(0.0, 5.0),
|
||||
pos: Vec2::ZERO,
|
||||
});
|
||||
|
||||
// Delta should be tripled (5.0 * 3.0 = 15.0)
|
||||
let depth_delta = actions.iter().find_map(|a| {
|
||||
if let GameAction::MoveEntityDepth { delta } = a {
|
||||
Some(*delta)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
assert_eq!(depth_delta, Some(15.0));
|
||||
}
|
||||
133
crates/libmarathon/src/engine/input_events.rs
Normal file
133
crates/libmarathon/src/engine/input_events.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
//! Abstract input event types for the engine
|
||||
//!
|
||||
//! These types are platform-agnostic and represent all forms of input
|
||||
//! (stylus, mouse, touch) in a unified way. Platform-specific code
|
||||
//! (iOS pencil bridge, desktop mouse) converts to these types.
|
||||
|
||||
use glam::Vec2;
|
||||
|
||||
/// Phase of a touch/stylus/mouse input
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum TouchPhase {
|
||||
/// Input just started
|
||||
Started,
|
||||
/// Input moved
|
||||
Moved,
|
||||
/// Input ended normally
|
||||
Ended,
|
||||
/// Input was cancelled (e.g., system gesture)
|
||||
Cancelled,
|
||||
}
|
||||
|
||||
/// Mouse button types
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum MouseButton {
|
||||
Left,
|
||||
Right,
|
||||
Middle,
|
||||
}
|
||||
|
||||
/// Keyboard key (using winit's KeyCode for now - can abstract later)
|
||||
pub use winit::keyboard::KeyCode;
|
||||
|
||||
/// Keyboard modifiers
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct Modifiers {
|
||||
pub shift: bool,
|
||||
pub ctrl: bool,
|
||||
pub alt: bool,
|
||||
pub meta: bool, // Command on macOS, Windows key on Windows
|
||||
}
|
||||
|
||||
/// Abstract input event that the engine processes
|
||||
///
|
||||
/// Platform-specific code converts native input (UITouch, winit events)
|
||||
/// into these engine-agnostic events.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum InputEvent {
|
||||
/// Stylus input (Apple Pencil, Surface Pen, etc.)
|
||||
Stylus {
|
||||
/// Screen position in pixels
|
||||
pos: Vec2,
|
||||
/// Pressure (0.0 = no pressure, 1.0+ = max pressure)
|
||||
/// Note: Apple Pencil reports 0.0-4.0 range
|
||||
pressure: f32,
|
||||
/// Tilt vector:
|
||||
/// - x: altitude angle (0 = flat on screen, π/2 = perpendicular)
|
||||
/// - y: azimuth angle (rotation around vertical axis)
|
||||
tilt: Vec2,
|
||||
/// Touch phase
|
||||
phase: TouchPhase,
|
||||
/// Platform timestamp (for input prediction)
|
||||
timestamp: f64,
|
||||
},
|
||||
|
||||
/// Mouse input (desktop)
|
||||
Mouse {
|
||||
/// Screen position in pixels
|
||||
pos: Vec2,
|
||||
/// Which button
|
||||
button: MouseButton,
|
||||
/// Touch phase
|
||||
phase: TouchPhase,
|
||||
},
|
||||
|
||||
/// Touch input (fingers on touchscreen)
|
||||
Touch {
|
||||
/// Screen position in pixels
|
||||
pos: Vec2,
|
||||
/// Touch phase
|
||||
phase: TouchPhase,
|
||||
/// Touch ID (for multi-touch tracking)
|
||||
id: u64,
|
||||
},
|
||||
|
||||
/// Keyboard input
|
||||
Keyboard {
|
||||
/// Physical key code
|
||||
key: KeyCode,
|
||||
/// Whether the key was pressed or released
|
||||
pressed: bool,
|
||||
/// Modifier keys held during the event
|
||||
modifiers: Modifiers,
|
||||
},
|
||||
|
||||
/// Mouse wheel scroll
|
||||
MouseWheel {
|
||||
/// Scroll delta (pixels or lines depending on device)
|
||||
delta: Vec2,
|
||||
/// Current mouse position
|
||||
pos: Vec2,
|
||||
},
|
||||
}
|
||||
|
||||
impl InputEvent {
|
||||
/// Get the position for positional input types
|
||||
pub fn position(&self) -> Option<Vec2> {
|
||||
match self {
|
||||
InputEvent::Stylus { pos, .. } => Some(*pos),
|
||||
InputEvent::Mouse { pos, .. } => Some(*pos),
|
||||
InputEvent::Touch { pos, .. } => Some(*pos),
|
||||
InputEvent::MouseWheel { pos, .. } => Some(*pos),
|
||||
InputEvent::Keyboard { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the phase for input types that have phases
|
||||
pub fn phase(&self) -> Option<TouchPhase> {
|
||||
match self {
|
||||
InputEvent::Stylus { phase, .. } => Some(*phase),
|
||||
InputEvent::Mouse { phase, .. } => Some(*phase),
|
||||
InputEvent::Touch { phase, .. } => Some(*phase),
|
||||
InputEvent::Keyboard { .. } | InputEvent::MouseWheel { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this is an active input (not ended/cancelled)
|
||||
pub fn is_active(&self) -> bool {
|
||||
match self.phase() {
|
||||
Some(phase) => !matches!(phase, TouchPhase::Ended | TouchPhase::Cancelled),
|
||||
None => true, // Keyboard and wheel events are considered instantaneous
|
||||
}
|
||||
}
|
||||
}
|
||||
21
crates/libmarathon/src/engine/mod.rs
Normal file
21
crates/libmarathon/src/engine/mod.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
//! Core Engine module - networking and persistence outside Bevy
|
||||
|
||||
mod bridge;
|
||||
mod commands;
|
||||
mod core;
|
||||
mod events;
|
||||
mod game_actions;
|
||||
mod input_controller;
|
||||
mod input_events;
|
||||
mod networking;
|
||||
mod persistence;
|
||||
|
||||
pub use bridge::{EngineBridge, EngineHandle};
|
||||
pub use commands::EngineCommand;
|
||||
pub use core::EngineCore;
|
||||
pub use events::EngineEvent;
|
||||
pub use game_actions::GameAction;
|
||||
pub use input_controller::{AccessibilitySettings, InputContext, InputController};
|
||||
pub use input_events::{InputEvent, KeyCode, Modifiers, MouseButton, TouchPhase};
|
||||
pub use networking::NetworkingManager;
|
||||
pub use persistence::PersistenceManager;
|
||||
243
crates/libmarathon/src/engine/networking.rs
Normal file
243
crates/libmarathon/src/engine/networking.rs
Normal file
@@ -0,0 +1,243 @@
|
||||
//! Networking Manager - handles iroh networking and CRDT state outside Bevy
|
||||
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time;
|
||||
use bytes::Bytes;
|
||||
use futures_lite::StreamExt;
|
||||
|
||||
use crate::networking::{
|
||||
EntityLockRegistry, NodeId, OperationLog, SessionId, TombstoneRegistry, VectorClock,
|
||||
VersionedMessage, SyncMessage, LockMessage,
|
||||
};
|
||||
|
||||
use super::EngineEvent;
|
||||
|
||||
pub struct NetworkingManager {
|
||||
session_id: SessionId,
|
||||
node_id: NodeId,
|
||||
|
||||
// Iroh networking
|
||||
sender: iroh_gossip::api::GossipSender,
|
||||
receiver: iroh_gossip::api::GossipReceiver,
|
||||
|
||||
// Keep these alive for the lifetime of the manager
|
||||
_endpoint: iroh::Endpoint,
|
||||
_router: iroh::protocol::Router,
|
||||
_gossip: iroh_gossip::net::Gossip,
|
||||
|
||||
// CRDT state
|
||||
vector_clock: VectorClock,
|
||||
operation_log: OperationLog,
|
||||
tombstones: TombstoneRegistry,
|
||||
locks: EntityLockRegistry,
|
||||
|
||||
// Track locks we own for heartbeat broadcasting
|
||||
our_locks: std::collections::HashSet<uuid::Uuid>,
|
||||
}
|
||||
|
||||
impl NetworkingManager {
|
||||
pub async fn new(session_id: SessionId) -> anyhow::Result<Self> {
|
||||
use iroh::{
|
||||
discovery::mdns::MdnsDiscovery,
|
||||
protocol::Router,
|
||||
Endpoint,
|
||||
};
|
||||
use iroh_gossip::{
|
||||
net::Gossip,
|
||||
proto::TopicId,
|
||||
};
|
||||
|
||||
// Create iroh endpoint with mDNS discovery
|
||||
let endpoint = Endpoint::builder()
|
||||
.discovery(MdnsDiscovery::builder())
|
||||
.bind()
|
||||
.await?;
|
||||
|
||||
let endpoint_id = endpoint.addr().id;
|
||||
|
||||
// Convert endpoint ID to NodeId (using first 16 bytes)
|
||||
let id_bytes = endpoint_id.as_bytes();
|
||||
let mut node_id_bytes = [0u8; 16];
|
||||
node_id_bytes.copy_from_slice(&id_bytes[..16]);
|
||||
let node_id = NodeId::from_bytes(node_id_bytes);
|
||||
|
||||
// Create gossip protocol
|
||||
let gossip = Gossip::builder().spawn(endpoint.clone());
|
||||
|
||||
// Derive session-specific ALPN for network isolation
|
||||
let session_alpn = session_id.to_alpn();
|
||||
|
||||
// Set up router to accept session ALPN
|
||||
let router = Router::builder(endpoint.clone())
|
||||
.accept(session_alpn.as_slice(), gossip.clone())
|
||||
.spawn();
|
||||
|
||||
// Subscribe to topic derived from session ALPN
|
||||
let topic_id = TopicId::from_bytes(session_alpn);
|
||||
let subscribe_handle = gossip.subscribe(topic_id, vec![]).await?;
|
||||
|
||||
let (sender, receiver) = subscribe_handle.split();
|
||||
|
||||
tracing::info!(
|
||||
"NetworkingManager started for session {} with node {}",
|
||||
session_id.to_code(),
|
||||
node_id
|
||||
);
|
||||
|
||||
let manager = Self {
|
||||
session_id,
|
||||
node_id,
|
||||
sender,
|
||||
receiver,
|
||||
_endpoint: endpoint,
|
||||
_router: router,
|
||||
_gossip: gossip,
|
||||
vector_clock: VectorClock::new(),
|
||||
operation_log: OperationLog::new(),
|
||||
tombstones: TombstoneRegistry::new(),
|
||||
locks: EntityLockRegistry::new(),
|
||||
our_locks: std::collections::HashSet::new(),
|
||||
};
|
||||
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
pub fn node_id(&self) -> NodeId {
|
||||
self.node_id
|
||||
}
|
||||
|
||||
pub fn session_id(&self) -> SessionId {
|
||||
self.session_id.clone()
|
||||
}
|
||||
|
||||
/// Process gossip events (unbounded) and periodic tasks (heartbeats, lock cleanup)
|
||||
pub async fn run(mut self, event_tx: mpsc::UnboundedSender<EngineEvent>) {
|
||||
let mut heartbeat_interval = time::interval(Duration::from_secs(1));
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
// Process gossip events unbounded (as fast as they arrive)
|
||||
Some(result) = self.receiver.next() => {
|
||||
match result {
|
||||
Ok(event) => {
|
||||
use iroh_gossip::api::Event;
|
||||
if let Event::Received(msg) = event {
|
||||
self.handle_sync_message(&msg.content, &event_tx).await;
|
||||
}
|
||||
// Note: Neighbor events are not exposed in the current API
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("Gossip receiver error: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Periodic tasks: heartbeats and lock cleanup
|
||||
_ = heartbeat_interval.tick() => {
|
||||
self.broadcast_lock_heartbeats(&event_tx).await;
|
||||
self.cleanup_expired_locks(&event_tx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async fn handle_sync_message(&mut self, msg_bytes: &[u8], event_tx: &mpsc::UnboundedSender<EngineEvent>) {
|
||||
// Deserialize SyncMessage
|
||||
let versioned: VersionedMessage = match bincode::deserialize(msg_bytes) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to deserialize sync message: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match versioned.message {
|
||||
SyncMessage::Lock(lock_msg) => {
|
||||
self.handle_lock_message(lock_msg, event_tx);
|
||||
}
|
||||
_ => {
|
||||
// TODO: Handle other message types (ComponentOp, EntitySpawn, etc.)
|
||||
tracing::debug!("Unhandled sync message type");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_lock_message(&mut self, msg: LockMessage, event_tx: &mpsc::UnboundedSender<EngineEvent>) {
|
||||
match msg {
|
||||
LockMessage::LockRequest { entity_id, node_id } => {
|
||||
match self.locks.try_acquire(entity_id, node_id) {
|
||||
Ok(()) => {
|
||||
// Track if this is our lock
|
||||
if node_id == self.node_id {
|
||||
self.our_locks.insert(entity_id);
|
||||
}
|
||||
|
||||
let _ = event_tx.send(EngineEvent::LockAcquired {
|
||||
entity_id,
|
||||
holder: node_id,
|
||||
});
|
||||
}
|
||||
Err(current_holder) => {
|
||||
let _ = event_tx.send(EngineEvent::LockDenied {
|
||||
entity_id,
|
||||
current_holder,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
LockMessage::LockHeartbeat { entity_id, holder } => {
|
||||
self.locks.renew_heartbeat(entity_id, holder);
|
||||
}
|
||||
LockMessage::LockRelease { entity_id, node_id } => {
|
||||
self.locks.release(entity_id, node_id);
|
||||
|
||||
// Remove from our locks tracking
|
||||
if node_id == self.node_id {
|
||||
self.our_locks.remove(&entity_id);
|
||||
}
|
||||
|
||||
let _ = event_tx.send(EngineEvent::LockReleased { entity_id });
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
async fn broadcast_lock_heartbeats(&mut self, _event_tx: &mpsc::UnboundedSender<EngineEvent>) {
|
||||
// Broadcast heartbeats for locks we hold
|
||||
for entity_id in self.our_locks.iter().copied() {
|
||||
self.locks.renew_heartbeat(entity_id, self.node_id);
|
||||
|
||||
let msg = VersionedMessage::new(SyncMessage::Lock(LockMessage::LockHeartbeat {
|
||||
entity_id,
|
||||
holder: self.node_id,
|
||||
}));
|
||||
|
||||
if let Ok(bytes) = bincode::serialize(&msg) {
|
||||
let _ = self.sender.broadcast(Bytes::from(bytes)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn cleanup_expired_locks(&mut self, event_tx: &mpsc::UnboundedSender<EngineEvent>) {
|
||||
// Get expired locks from registry
|
||||
let expired = self.locks.get_expired_locks();
|
||||
|
||||
for entity_id in expired {
|
||||
// Only cleanup if it's not our lock
|
||||
if let Some(holder) = self.locks.get_holder(entity_id, self.node_id) {
|
||||
if holder != self.node_id {
|
||||
self.locks.force_release(entity_id);
|
||||
let _ = event_tx.send(EngineEvent::LockExpired { entity_id });
|
||||
tracing::info!("Lock expired for entity {}", entity_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn shutdown(self) {
|
||||
tracing::info!("NetworkingManager shut down");
|
||||
// endpoint and gossip will be dropped automatically
|
||||
}
|
||||
}
|
||||
79
crates/libmarathon/src/engine/persistence.rs
Normal file
79
crates/libmarathon/src/engine/persistence.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
//! Persistence Manager - handles SQLite storage outside Bevy
|
||||
|
||||
use rusqlite::{Connection, OptionalExtension};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use crate::networking::{Session, SessionId};
|
||||
|
||||
pub struct PersistenceManager {
|
||||
conn: Arc<Mutex<Connection>>,
|
||||
}
|
||||
|
||||
impl PersistenceManager {
|
||||
pub fn new(db_path: &str) -> Self {
|
||||
let conn = Connection::open(db_path).expect("Failed to open database");
|
||||
|
||||
// Initialize schema (Phase 1 stub - will load from file in Phase 4)
|
||||
let schema = "
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id TEXT PRIMARY KEY,
|
||||
state TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
last_active_at INTEGER NOT NULL
|
||||
);
|
||||
";
|
||||
|
||||
if let Err(e) = conn.execute_batch(schema) {
|
||||
tracing::warn!("Failed to initialize schema: {}", e);
|
||||
}
|
||||
|
||||
Self {
|
||||
conn: Arc::new(Mutex::new(conn)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_session(&self, session: &Session) -> anyhow::Result<()> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO sessions (id, state, created_at, last_active_at)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
(
|
||||
session.id.to_code(),
|
||||
format!("{:?}", session.state),
|
||||
session.created_at,
|
||||
session.last_active,
|
||||
),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_last_active_session(&self) -> anyhow::Result<Option<Session>> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
|
||||
// Query for the most recently active session
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT id, state, created_at, last_active_at
|
||||
FROM sessions
|
||||
ORDER BY last_active_at DESC
|
||||
LIMIT 1"
|
||||
)?;
|
||||
|
||||
let session = stmt.query_row([], |row| {
|
||||
let id_code: String = row.get(0)?;
|
||||
let _state: String = row.get(1)?;
|
||||
let _created_at: String = row.get(2)?;
|
||||
let _last_active_at: String = row.get(3)?;
|
||||
|
||||
// Parse session ID from code
|
||||
if let Ok(session_id) = SessionId::from_code(&id_code) {
|
||||
Ok(Some(Session::new(session_id)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}).optional()?;
|
||||
|
||||
Ok(session.flatten())
|
||||
}
|
||||
}
|
||||
15
crates/libmarathon/src/error.rs
Normal file
15
crates/libmarathon/src/error.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ChatDbError {
|
||||
#[error("Database error: {0}")]
|
||||
Database(#[from] rusqlite::Error),
|
||||
|
||||
#[error("Not found: {0}")]
|
||||
NotFound(String),
|
||||
|
||||
#[error("Invalid data: {0}")]
|
||||
InvalidData(String),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, ChatDbError>;
|
||||
41
crates/libmarathon/src/lib.rs
Normal file
41
crates/libmarathon/src/lib.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
//! Data access layer for iMessage chat.db
|
||||
//!
|
||||
//! This library provides a read-only interface to query messages from a
|
||||
//! specific conversation.
|
||||
//!
|
||||
//! # Safety
|
||||
//!
|
||||
//! All database connections are opened in read-only mode to prevent any
|
||||
//! accidental modifications to your iMessage database.
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use libmarathon::ChatDb;
|
||||
//!
|
||||
//! let db = ChatDb::open("chat.db")?;
|
||||
//!
|
||||
//! // Get all messages from January 2024 to now
|
||||
//! let messages = db.get_our_messages(None, None)?;
|
||||
//! println!("Found {} messages", messages.len());
|
||||
//! # Ok::<(), libmarathon::ChatDbError>(())
|
||||
//! ```
|
||||
|
||||
mod db;
|
||||
mod error;
|
||||
mod models;
|
||||
pub mod engine;
|
||||
pub mod networking;
|
||||
pub mod persistence;
|
||||
pub mod platform;
|
||||
pub mod sync;
|
||||
|
||||
pub use db::ChatDb;
|
||||
pub use error::{
|
||||
ChatDbError,
|
||||
Result,
|
||||
};
|
||||
pub use models::{
|
||||
Chat,
|
||||
Message,
|
||||
};
|
||||
126
crates/libmarathon/src/models.rs
Normal file
126
crates/libmarathon/src/models.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
use chrono::{
|
||||
DateTime,
|
||||
Utc,
|
||||
};
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
/// Seconds between Unix epoch (1970-01-01) and Apple epoch (2001-01-01)
|
||||
/// Apple's Cocoa timestamps use 2001-01-01 00:00:00 UTC as their reference
|
||||
/// point
|
||||
const APPLE_EPOCH_OFFSET: i64 = 978307200;
|
||||
|
||||
/// Represents a message in the iMessage database
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Message {
|
||||
pub rowid: i64,
|
||||
pub guid: String,
|
||||
pub text: Option<String>,
|
||||
pub service: Option<String>,
|
||||
pub handle_id: i64,
|
||||
pub date: Option<DateTime<Utc>>,
|
||||
pub date_read: Option<DateTime<Utc>>,
|
||||
pub date_delivered: Option<DateTime<Utc>>,
|
||||
pub is_from_me: bool,
|
||||
pub is_read: bool,
|
||||
pub is_delivered: bool,
|
||||
pub is_sent: bool,
|
||||
pub is_emote: bool,
|
||||
pub is_audio_message: bool,
|
||||
pub cache_has_attachments: bool,
|
||||
pub associated_message_guid: Option<String>,
|
||||
pub associated_message_type: i64,
|
||||
pub thread_originator_guid: Option<String>,
|
||||
pub reply_to_guid: Option<String>,
|
||||
pub is_spam: bool,
|
||||
}
|
||||
|
||||
/// Represents a chat/conversation
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Chat {
|
||||
pub rowid: i64,
|
||||
pub guid: String,
|
||||
pub chat_identifier: Option<String>,
|
||||
pub service_name: Option<String>,
|
||||
pub display_name: Option<String>,
|
||||
pub group_id: Option<String>,
|
||||
pub room_name: Option<String>,
|
||||
pub is_archived: bool,
|
||||
pub is_filtered: bool,
|
||||
pub last_read_message_timestamp: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
/// Helper function to convert Apple's Cocoa timestamp (seconds since
|
||||
/// 2001-01-01) to DateTime
|
||||
pub fn apple_timestamp_to_datetime(timestamp: i64) -> DateTime<Utc> {
|
||||
// Apple's Cocoa timestamps are in nanoseconds since 2001-01-01 00:00:00 UTC
|
||||
// Convert to Unix timestamp (seconds since 1970-01-01 00:00:00 UTC)
|
||||
let seconds = timestamp / 1_000_000_000 + APPLE_EPOCH_OFFSET;
|
||||
let nanos = (timestamp % 1_000_000_000) as u32;
|
||||
|
||||
DateTime::from_timestamp(seconds, nanos)
|
||||
.unwrap_or_else(|| DateTime::from_timestamp(0, 0).unwrap())
|
||||
}
|
||||
|
||||
/// Helper function to convert DateTime to Apple's Cocoa timestamp
|
||||
pub fn datetime_to_apple_timestamp(dt: DateTime<Utc>) -> i64 {
|
||||
let unix_timestamp = dt.timestamp();
|
||||
let nanos = dt.timestamp_subsec_nanos() as i64;
|
||||
|
||||
(unix_timestamp - APPLE_EPOCH_OFFSET) * 1_000_000_000 + nanos
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use chrono::{
|
||||
Datelike,
|
||||
TimeZone,
|
||||
Timelike,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_apple_timestamp_to_datetime_zero() {
|
||||
let dt = apple_timestamp_to_datetime(0);
|
||||
assert_eq!(dt.year(), 2001);
|
||||
assert_eq!(dt.month(), 1);
|
||||
assert_eq!(dt.day(), 1);
|
||||
assert_eq!(dt.hour(), 0);
|
||||
assert_eq!(dt.minute(), 0);
|
||||
assert_eq!(dt.second(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apple_timestamp_to_datetime_known_value() {
|
||||
let timestamp = 694224000000000000i64;
|
||||
let dt = apple_timestamp_to_datetime(timestamp);
|
||||
assert_eq!(dt.year(), 2023);
|
||||
assert_eq!(dt.month(), 1);
|
||||
assert_eq!(dt.day(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apple_timestamp_roundtrip() {
|
||||
let original = 694224000000000000i64;
|
||||
let dt = apple_timestamp_to_datetime(original);
|
||||
let converted_back = datetime_to_apple_timestamp(dt);
|
||||
assert_eq!(original, converted_back);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datetime_to_apple_timestamp_epoch() {
|
||||
let dt = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).unwrap();
|
||||
let timestamp = datetime_to_apple_timestamp(dt);
|
||||
assert_eq!(timestamp, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_negative_apple_timestamp() {
|
||||
let timestamp = -31536000000000000i64;
|
||||
let dt = apple_timestamp_to_datetime(timestamp);
|
||||
assert_eq!(dt.year(), 2000);
|
||||
}
|
||||
}
|
||||
546
crates/libmarathon/src/networking/apply_ops.rs
Normal file
546
crates/libmarathon/src/networking/apply_ops.rs
Normal file
@@ -0,0 +1,546 @@
|
||||
//! Apply remote operations to local ECS state
|
||||
//!
|
||||
//! This module handles incoming EntityDelta messages and applies them to the
|
||||
//! local Bevy world using CRDT merge semantics.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use bevy::prelude::*;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
networking::{
|
||||
VectorClock,
|
||||
blob_support::{
|
||||
BlobStore,
|
||||
get_component_data,
|
||||
},
|
||||
delta_generation::NodeVectorClock,
|
||||
entity_map::NetworkEntityMap,
|
||||
merge::compare_operations_lww,
|
||||
messages::{
|
||||
ComponentData,
|
||||
EntityDelta,
|
||||
SyncMessage,
|
||||
},
|
||||
operations::ComponentOp,
|
||||
},
|
||||
persistence::reflection::deserialize_component_typed,
|
||||
};
|
||||
|
||||
/// Resource to track the last vector clock and originating node for each
|
||||
/// component on each entity
|
||||
///
|
||||
/// This enables Last-Write-Wins conflict resolution by comparing incoming
|
||||
/// operations' vector clocks with the current component's vector clock.
|
||||
/// The node_id is used as a deterministic tiebreaker for concurrent operations.
|
||||
#[derive(Resource, Default)]
|
||||
pub struct ComponentVectorClocks {
|
||||
/// Maps (entity_network_id, component_type) -> (vector_clock,
|
||||
/// originating_node_id)
|
||||
clocks: HashMap<(Uuid, String), (VectorClock, Uuid)>,
|
||||
}
|
||||
|
||||
impl ComponentVectorClocks {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
clocks: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current vector clock and node_id for a component
|
||||
pub fn get(&self, entity_id: Uuid, component_type: &str) -> Option<&(VectorClock, Uuid)> {
|
||||
self.clocks.get(&(entity_id, component_type.to_string()))
|
||||
}
|
||||
|
||||
/// Update the vector clock and node_id for a component
|
||||
pub fn set(
|
||||
&mut self,
|
||||
entity_id: Uuid,
|
||||
component_type: String,
|
||||
clock: VectorClock,
|
||||
node_id: Uuid,
|
||||
) {
|
||||
self.clocks
|
||||
.insert((entity_id, component_type), (clock, node_id));
|
||||
}
|
||||
|
||||
/// Remove all clocks for an entity (when entity is deleted)
|
||||
pub fn remove_entity(&mut self, entity_id: Uuid) {
|
||||
self.clocks.retain(|(eid, _), _| *eid != entity_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply an EntityDelta message to the local world
|
||||
///
|
||||
/// This function:
|
||||
/// 1. Checks tombstone registry to prevent resurrection
|
||||
/// 2. Looks up the entity by network_id
|
||||
/// 3. Spawns a new entity if it doesn't exist
|
||||
/// 4. Applies each ComponentOp using CRDT merge semantics
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `delta`: The EntityDelta to apply
|
||||
/// - `world`: The Bevy world to apply changes to
|
||||
pub fn apply_entity_delta(delta: &EntityDelta, world: &mut World) {
|
||||
// Validate and merge the remote vector clock
|
||||
{
|
||||
let mut node_clock = world.resource_mut::<NodeVectorClock>();
|
||||
|
||||
// Check for clock regression (shouldn't happen in correct implementations)
|
||||
if delta.vector_clock.happened_before(&node_clock.clock) {
|
||||
warn!(
|
||||
"Received operation with clock from the past for entity {:?}. \
|
||||
Remote clock happened before our clock. This may indicate clock issues.",
|
||||
delta.entity_id
|
||||
);
|
||||
}
|
||||
|
||||
// Merge the remote vector clock into ours
|
||||
node_clock.clock.merge(&delta.vector_clock);
|
||||
}
|
||||
|
||||
// Check if any operations are Delete operations
|
||||
for op in &delta.operations {
|
||||
if let crate::networking::ComponentOp::Delete { vector_clock } = op {
|
||||
// Record tombstone
|
||||
if let Some(mut registry) =
|
||||
world.get_resource_mut::<crate::networking::TombstoneRegistry>()
|
||||
{
|
||||
registry.record_deletion(delta.entity_id, delta.node_id, vector_clock.clone());
|
||||
|
||||
// Despawn the entity if it exists locally
|
||||
let entity_to_despawn = {
|
||||
let entity_map = world.resource::<NetworkEntityMap>();
|
||||
entity_map.get_entity(delta.entity_id)
|
||||
};
|
||||
if let Some(entity) = entity_to_despawn {
|
||||
world.despawn(entity);
|
||||
let mut entity_map = world.resource_mut::<NetworkEntityMap>();
|
||||
entity_map.remove_by_network_id(delta.entity_id);
|
||||
info!(
|
||||
"Despawned entity {:?} due to Delete operation",
|
||||
delta.entity_id
|
||||
);
|
||||
}
|
||||
|
||||
// Don't process other operations - entity is deleted
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we should ignore this delta due to deletion
|
||||
if let Some(registry) = world.get_resource::<crate::networking::TombstoneRegistry>() {
|
||||
if registry.should_ignore_operation(delta.entity_id, &delta.vector_clock) {
|
||||
debug!("Ignoring delta for deleted entity {:?}", delta.entity_id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let entity = {
|
||||
let entity_map = world.resource::<NetworkEntityMap>();
|
||||
if let Some(entity) = entity_map.get_entity(delta.entity_id) {
|
||||
entity
|
||||
} else {
|
||||
// Use shared helper to spawn networked entity with persistence
|
||||
crate::networking::spawn_networked_entity(world, delta.entity_id, delta.node_id)
|
||||
}
|
||||
};
|
||||
|
||||
// Apply each operation (skip Delete operations - handled above)
|
||||
for op in &delta.operations {
|
||||
if !op.is_delete() {
|
||||
apply_component_op(entity, op, delta.node_id, world);
|
||||
}
|
||||
}
|
||||
|
||||
// Trigger persistence by marking Persisted as changed
|
||||
// This ensures remote entities are persisted after sync
|
||||
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
|
||||
if let Some(mut persisted) = entity_mut.get_mut::<crate::persistence::Persisted>() {
|
||||
// Accessing &mut triggers Bevy's change detection
|
||||
let _ = &mut *persisted;
|
||||
debug!(
|
||||
"Triggered persistence for synced entity {:?}",
|
||||
delta.entity_id
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a single ComponentOp to an entity
|
||||
///
|
||||
/// This dispatches to the appropriate CRDT merge logic based on the operation
|
||||
/// type.
|
||||
fn apply_component_op(entity: Entity, op: &ComponentOp, incoming_node_id: Uuid, world: &mut World) {
|
||||
match op {
|
||||
| ComponentOp::Set {
|
||||
component_type,
|
||||
data,
|
||||
vector_clock,
|
||||
} => {
|
||||
apply_set_operation_with_lww(
|
||||
entity,
|
||||
component_type,
|
||||
data,
|
||||
vector_clock,
|
||||
incoming_node_id,
|
||||
world,
|
||||
);
|
||||
},
|
||||
| ComponentOp::SetAdd { component_type, .. } => {
|
||||
// OR-Set add - Phase 10 provides OrSet<T> type
|
||||
// Application code should use OrSet in components and handle SetAdd/SetRemove
|
||||
// Full integration will be in Phase 12 plugin
|
||||
debug!(
|
||||
"SetAdd operation for {} (use OrSet<T> in components)",
|
||||
component_type
|
||||
);
|
||||
},
|
||||
| ComponentOp::SetRemove { component_type, .. } => {
|
||||
// OR-Set remove - Phase 10 provides OrSet<T> type
|
||||
// Application code should use OrSet in components and handle SetAdd/SetRemove
|
||||
// Full integration will be in Phase 12 plugin
|
||||
debug!(
|
||||
"SetRemove operation for {} (use OrSet<T> in components)",
|
||||
component_type
|
||||
);
|
||||
},
|
||||
| ComponentOp::SequenceInsert { .. } => {
|
||||
// RGA insert - will be implemented in Phase 11
|
||||
debug!("SequenceInsert operation not yet implemented");
|
||||
},
|
||||
| ComponentOp::SequenceDelete { .. } => {
|
||||
// RGA delete - will be implemented in Phase 11
|
||||
debug!("SequenceDelete operation not yet implemented");
|
||||
},
|
||||
| ComponentOp::Delete { .. } => {
|
||||
// Entity deletion - will be implemented in Phase 9
|
||||
debug!("Delete operation not yet implemented");
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a Set operation with Last-Write-Wins conflict resolution
|
||||
///
|
||||
/// Compares the incoming vector clock with the stored clock for this component.
|
||||
/// Only applies the operation if the incoming clock wins the LWW comparison.
|
||||
/// Uses node_id as a deterministic tiebreaker for concurrent operations.
|
||||
fn apply_set_operation_with_lww(
|
||||
entity: Entity,
|
||||
component_type: &str,
|
||||
data: &ComponentData,
|
||||
incoming_clock: &VectorClock,
|
||||
incoming_node_id: Uuid,
|
||||
world: &mut World,
|
||||
) {
|
||||
// Get the network ID for this entity
|
||||
let entity_network_id = {
|
||||
if let Ok(entity_ref) = world.get_entity(entity) {
|
||||
if let Some(networked) = entity_ref.get::<crate::networking::NetworkedEntity>() {
|
||||
networked.network_id
|
||||
} else {
|
||||
warn!("Entity {:?} has no NetworkedEntity component", entity);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
warn!("Entity {:?} not found", entity);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Check if we should apply this operation based on LWW
|
||||
let should_apply = {
|
||||
if let Some(component_clocks) = world.get_resource::<ComponentVectorClocks>() {
|
||||
if let Some((current_clock, current_node_id)) =
|
||||
component_clocks.get(entity_network_id, component_type)
|
||||
{
|
||||
// We have a current clock - do LWW comparison with real node IDs
|
||||
let decision = compare_operations_lww(
|
||||
current_clock,
|
||||
*current_node_id,
|
||||
incoming_clock,
|
||||
incoming_node_id,
|
||||
);
|
||||
|
||||
match decision {
|
||||
| crate::networking::merge::MergeDecision::ApplyRemote => {
|
||||
debug!(
|
||||
"Applying remote Set for {} (remote is newer)",
|
||||
component_type
|
||||
);
|
||||
true
|
||||
},
|
||||
| crate::networking::merge::MergeDecision::KeepLocal => {
|
||||
debug!(
|
||||
"Ignoring remote Set for {} (local is newer)",
|
||||
component_type
|
||||
);
|
||||
false
|
||||
},
|
||||
| crate::networking::merge::MergeDecision::Concurrent => {
|
||||
// For concurrent operations, use node_id comparison as deterministic
|
||||
// tiebreaker This ensures all nodes make the same
|
||||
// decision for concurrent updates
|
||||
if incoming_node_id > *current_node_id {
|
||||
debug!(
|
||||
"Applying remote Set for {} (concurrent, remote node_id {:?} > local {:?})",
|
||||
component_type, incoming_node_id, current_node_id
|
||||
);
|
||||
true
|
||||
} else {
|
||||
debug!(
|
||||
"Ignoring remote Set for {} (concurrent, local node_id {:?} >= remote {:?})",
|
||||
component_type, current_node_id, incoming_node_id
|
||||
);
|
||||
false
|
||||
}
|
||||
},
|
||||
| crate::networking::merge::MergeDecision::Equal => {
|
||||
debug!("Ignoring remote Set for {} (clocks equal)", component_type);
|
||||
false
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// No current clock - this is the first time we're setting this component
|
||||
debug!(
|
||||
"Applying remote Set for {} (no current clock)",
|
||||
component_type
|
||||
);
|
||||
true
|
||||
}
|
||||
} else {
|
||||
// No ComponentVectorClocks resource - apply unconditionally
|
||||
warn!("ComponentVectorClocks resource not found - applying Set without LWW check");
|
||||
true
|
||||
}
|
||||
};
|
||||
|
||||
if !should_apply {
|
||||
return;
|
||||
}
|
||||
|
||||
// Apply the operation
|
||||
apply_set_operation(entity, component_type, data, world);
|
||||
|
||||
// Update the stored vector clock with node_id
|
||||
if let Some(mut component_clocks) = world.get_resource_mut::<ComponentVectorClocks>() {
|
||||
component_clocks.set(
|
||||
entity_network_id,
|
||||
component_type.to_string(),
|
||||
incoming_clock.clone(),
|
||||
incoming_node_id,
|
||||
);
|
||||
debug!(
|
||||
"Updated vector clock for {} on entity {:?} (node_id: {:?})",
|
||||
component_type, entity_network_id, incoming_node_id
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a Set operation (Last-Write-Wins)
|
||||
///
|
||||
/// Deserializes the component and inserts/updates it on the entity.
|
||||
/// Handles both inline data and blob references.
|
||||
fn apply_set_operation(
|
||||
entity: Entity,
|
||||
component_type: &str,
|
||||
data: &ComponentData,
|
||||
world: &mut World,
|
||||
) {
|
||||
let type_registry = {
|
||||
let registry_resource = world.resource::<AppTypeRegistry>();
|
||||
registry_resource.read()
|
||||
};
|
||||
let blob_store = world.get_resource::<BlobStore>();
|
||||
// Get the actual data (resolve blob if needed)
|
||||
let data_bytes = match data {
|
||||
| ComponentData::Inline(bytes) => bytes.clone(),
|
||||
| ComponentData::BlobRef { hash: _, size: _ } => {
|
||||
if let Some(store) = blob_store {
|
||||
match get_component_data(data, store) {
|
||||
| Ok(bytes) => bytes,
|
||||
| Err(e) => {
|
||||
error!(
|
||||
"Failed to retrieve blob for component {}: {}",
|
||||
component_type, e
|
||||
);
|
||||
return;
|
||||
},
|
||||
}
|
||||
} else {
|
||||
error!(
|
||||
"Blob reference for {} but no blob store available",
|
||||
component_type
|
||||
);
|
||||
return;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let reflected = match deserialize_component_typed(&data_bytes, component_type, &type_registry) {
|
||||
| Ok(reflected) => reflected,
|
||||
| Err(e) => {
|
||||
error!("Failed to deserialize component {}: {}", component_type, e);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
let registration = match type_registry.get_with_type_path(component_type) {
|
||||
| Some(reg) => reg,
|
||||
| None => {
|
||||
error!("Component type {} not registered", component_type);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
let reflect_component = match registration.data::<ReflectComponent>() {
|
||||
| Some(rc) => rc.clone(),
|
||||
| None => {
|
||||
error!(
|
||||
"Component type {} does not have ReflectComponent data",
|
||||
component_type
|
||||
);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
drop(type_registry);
|
||||
|
||||
let type_registry_arc = world.resource::<AppTypeRegistry>().clone();
|
||||
let type_registry_guard = type_registry_arc.read();
|
||||
|
||||
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
|
||||
reflect_component.insert(&mut entity_mut, &*reflected, &type_registry_guard);
|
||||
debug!("Applied Set operation for {}", component_type);
|
||||
|
||||
// If we just inserted a Transform component, also add NetworkedTransform
|
||||
// This ensures remote entities can have their Transform changes detected
|
||||
if component_type == "bevy_transform::components::transform::Transform" {
|
||||
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
|
||||
if entity_mut
|
||||
.get::<crate::networking::NetworkedTransform>()
|
||||
.is_none()
|
||||
{
|
||||
entity_mut.insert(crate::networking::NetworkedTransform::default());
|
||||
debug!("Added NetworkedTransform to entity with Transform");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!(
|
||||
"Entity {:?} not found when applying component {}",
|
||||
entity, component_type
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// System to receive and apply incoming EntityDelta messages
|
||||
///
|
||||
/// This system polls the GossipBridge for incoming messages and applies them
|
||||
/// to the local world.
|
||||
///
|
||||
/// Add this to your app:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::receive_and_apply_deltas_system;
|
||||
///
|
||||
/// App::new().add_systems(Update, receive_and_apply_deltas_system);
|
||||
/// ```
|
||||
pub fn receive_and_apply_deltas_system(world: &mut World) {
|
||||
// Check if bridge exists
|
||||
if world
|
||||
.get_resource::<crate::networking::GossipBridge>()
|
||||
.is_none()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Clone the bridge to avoid borrowing issues
|
||||
let bridge = world.resource::<crate::networking::GossipBridge>().clone();
|
||||
|
||||
// Poll for incoming messages
|
||||
while let Some(message) = bridge.try_recv() {
|
||||
match message.message {
|
||||
| SyncMessage::EntityDelta {
|
||||
entity_id,
|
||||
node_id,
|
||||
vector_clock,
|
||||
operations,
|
||||
} => {
|
||||
let delta = EntityDelta {
|
||||
entity_id,
|
||||
node_id,
|
||||
vector_clock,
|
||||
operations,
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Received EntityDelta for entity {:?} with {} operations",
|
||||
delta.entity_id,
|
||||
delta.operations.len()
|
||||
);
|
||||
|
||||
apply_entity_delta(&delta, world);
|
||||
},
|
||||
| SyncMessage::JoinRequest { .. } => {
|
||||
// Handled by handle_join_requests_system
|
||||
debug!("JoinRequest handled by dedicated system");
|
||||
},
|
||||
| SyncMessage::FullState { .. } => {
|
||||
// Handled by handle_full_state_system
|
||||
debug!("FullState handled by dedicated system");
|
||||
},
|
||||
| SyncMessage::SyncRequest { .. } => {
|
||||
// Handled by handle_sync_requests_system
|
||||
debug!("SyncRequest handled by dedicated system");
|
||||
},
|
||||
| SyncMessage::MissingDeltas { .. } => {
|
||||
// Handled by handle_missing_deltas_system
|
||||
debug!("MissingDeltas handled by dedicated system");
|
||||
},
|
||||
| SyncMessage::Lock { .. } => {
|
||||
// Handled by lock message dispatcher
|
||||
debug!("Lock message handled by dedicated system");
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_node_clock_merge() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let mut node_clock = NodeVectorClock::new(node_id);
|
||||
|
||||
let remote_node = uuid::Uuid::new_v4();
|
||||
let mut remote_clock = crate::networking::VectorClock::new();
|
||||
remote_clock.increment(remote_node);
|
||||
remote_clock.increment(remote_node);
|
||||
|
||||
// Merge remote clock
|
||||
node_clock.clock.merge(&remote_clock);
|
||||
|
||||
// Our clock should have the remote node's sequence
|
||||
assert_eq!(node_clock.clock.get(remote_node), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entity_delta_structure() {
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = crate::networking::VectorClock::new();
|
||||
|
||||
let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||
|
||||
assert_eq!(delta.entity_id, entity_id);
|
||||
assert_eq!(delta.node_id, node_id);
|
||||
assert_eq!(delta.operations.len(), 0);
|
||||
}
|
||||
}
|
||||
121
crates/libmarathon/src/networking/auth.rs
Normal file
121
crates/libmarathon/src/networking/auth.rs
Normal file
@@ -0,0 +1,121 @@
|
||||
//! Authentication and authorization for the networking layer
|
||||
|
||||
use sha2::{
|
||||
Digest,
|
||||
Sha256,
|
||||
};
|
||||
|
||||
use crate::networking::error::{
|
||||
NetworkingError,
|
||||
Result,
|
||||
};
|
||||
|
||||
/// Validate session secret using constant-time comparison
|
||||
///
|
||||
/// This function uses SHA-256 hash comparison to perform constant-time
|
||||
/// comparison and prevent timing attacks. The session secret is a pre-shared
|
||||
/// key that controls access to the gossip network.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `provided` - The session secret provided by the joining peer
|
||||
/// * `expected` - The expected session secret configured for this node
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(())` - Session secret is valid
|
||||
/// * `Err(NetworkingError::SecurityError)` - Session secret is invalid
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use libmarathon::networking::auth::validate_session_secret;
|
||||
///
|
||||
/// let secret = b"my_secret_key";
|
||||
/// assert!(validate_session_secret(secret, secret).is_ok());
|
||||
///
|
||||
/// let wrong_secret = b"wrong_key";
|
||||
/// assert!(validate_session_secret(wrong_secret, secret).is_err());
|
||||
/// ```
|
||||
pub fn validate_session_secret(provided: &[u8], expected: &[u8]) -> Result<()> {
|
||||
// Different lengths = definitely not equal, fail fast
|
||||
if provided.len() != expected.len() {
|
||||
return Err(NetworkingError::SecurityError(
|
||||
"Invalid session secret".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Hash both secrets for constant-time comparison
|
||||
let provided_hash = hash_secret(provided);
|
||||
let expected_hash = hash_secret(expected);
|
||||
|
||||
// Compare hashes using constant-time comparison
|
||||
// This prevents timing attacks that could leak information about the secret
|
||||
if provided_hash != expected_hash {
|
||||
return Err(NetworkingError::SecurityError(
|
||||
"Invalid session secret".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Hash a secret using SHA-256
|
||||
///
|
||||
/// This is used internally for constant-time comparison of session secrets.
|
||||
fn hash_secret(secret: &[u8]) -> Vec<u8> {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(secret);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_valid_secret() {
|
||||
let secret = b"my_secret_key";
|
||||
assert!(validate_session_secret(secret, secret).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_secret() {
|
||||
let secret1 = b"my_secret_key";
|
||||
let secret2 = b"wrong_secret_key";
|
||||
let result = validate_session_secret(secret1, secret2);
|
||||
assert!(result.is_err());
|
||||
match result {
|
||||
| Err(NetworkingError::SecurityError(_)) => {}, // Expected
|
||||
| _ => panic!("Expected SecurityError"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_lengths() {
|
||||
let secret1 = b"short";
|
||||
let secret2 = b"much_longer_secret";
|
||||
let result = validate_session_secret(secret1, secret2);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_secrets() {
|
||||
let empty = b"";
|
||||
assert!(validate_session_secret(empty, empty).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash_is_deterministic() {
|
||||
let secret = b"test_secret";
|
||||
let hash1 = hash_secret(secret);
|
||||
let hash2 = hash_secret(secret);
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_secrets_have_different_hashes() {
|
||||
let secret1 = b"secret1";
|
||||
let secret2 = b"secret2";
|
||||
let hash1 = hash_secret(secret1);
|
||||
let hash2 = hash_secret(secret2);
|
||||
assert_ne!(hash1, hash2);
|
||||
}
|
||||
}
|
||||
390
crates/libmarathon/src/networking/blob_support.rs
Normal file
390
crates/libmarathon/src/networking/blob_support.rs
Normal file
@@ -0,0 +1,390 @@
|
||||
//! Large blob support for components >64KB
|
||||
//!
|
||||
//! This module handles large component data using iroh-blobs. When a component
|
||||
//! exceeds the inline threshold (64KB), it's stored as a blob and referenced
|
||||
//! by its hash in the ComponentOp.
|
||||
//!
|
||||
//! **NOTE:** This is a simplified implementation for Phase 6. Full iroh-blobs
|
||||
//! integration will be completed when we integrate with actual gossip
|
||||
//! networking.
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
Arc,
|
||||
Mutex,
|
||||
},
|
||||
};
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::networking::{
|
||||
error::{
|
||||
NetworkingError,
|
||||
Result,
|
||||
},
|
||||
messages::ComponentData,
|
||||
};
|
||||
|
||||
/// Threshold for storing data as a blob (64KB)
|
||||
pub const BLOB_THRESHOLD: usize = 64 * 1024;
|
||||
|
||||
/// Hash type for blob references
|
||||
pub type BlobHash = Vec<u8>;
|
||||
|
||||
/// Bevy resource for managing blobs
|
||||
///
|
||||
/// This resource provides blob storage and retrieval. In Phase 6, we use
|
||||
/// an in-memory cache. Later phases will integrate with iroh-blobs for
|
||||
/// persistent storage and P2P transfer.
|
||||
#[derive(Resource, Clone)]
|
||||
pub struct BlobStore {
|
||||
/// In-memory cache of blobs (hash -> data)
|
||||
cache: Arc<Mutex<HashMap<BlobHash, Vec<u8>>>>,
|
||||
}
|
||||
|
||||
impl BlobStore {
|
||||
/// Create a new blob store
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Store a blob and return its hash
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::BlobStore;
|
||||
///
|
||||
/// let store = BlobStore::new();
|
||||
/// let data = vec![1, 2, 3, 4, 5];
|
||||
/// let hash = store.store_blob(data.clone()).unwrap();
|
||||
///
|
||||
/// let retrieved = store.get_blob(&hash).unwrap();
|
||||
/// assert_eq!(retrieved, Some(data));
|
||||
/// ```
|
||||
pub fn store_blob(&self, data: Vec<u8>) -> Result<BlobHash> {
|
||||
// Use SHA-256 for content-addressable storage
|
||||
let hash = Self::hash_data(&data);
|
||||
|
||||
self.cache
|
||||
.lock()
|
||||
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||
.insert(hash.clone(), data);
|
||||
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
/// Retrieve a blob by its hash
|
||||
///
|
||||
/// Returns `None` if the blob is not in the cache.
|
||||
pub fn get_blob(&self, hash: &BlobHash) -> Result<Option<Vec<u8>>> {
|
||||
Ok(self
|
||||
.cache
|
||||
.lock()
|
||||
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||
.get(hash)
|
||||
.cloned())
|
||||
}
|
||||
|
||||
/// Check if a blob exists in the cache
|
||||
///
|
||||
/// Returns an error if the cache lock is poisoned.
|
||||
pub fn has_blob(&self, hash: &BlobHash) -> Result<bool> {
|
||||
Ok(self
|
||||
.cache
|
||||
.lock()
|
||||
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||
.contains_key(hash))
|
||||
}
|
||||
|
||||
/// Get a blob if it exists (atomic check-and-get)
|
||||
///
|
||||
/// This is safer than calling `has_blob()` followed by `get_blob()` because
|
||||
/// it's atomic - the blob can't be removed between the check and get.
|
||||
pub fn get_blob_if_exists(&self, hash: &BlobHash) -> Result<Option<Vec<u8>>> {
|
||||
Ok(self
|
||||
.cache
|
||||
.lock()
|
||||
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||
.get(hash)
|
||||
.cloned())
|
||||
}
|
||||
|
||||
/// Get cache size (number of blobs)
|
||||
///
|
||||
/// Returns an error if the cache lock is poisoned.
|
||||
pub fn cache_size(&self) -> Result<usize> {
|
||||
Ok(self
|
||||
.cache
|
||||
.lock()
|
||||
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||
.len())
|
||||
}
|
||||
|
||||
/// Clear the cache
|
||||
pub fn clear_cache(&self) -> Result<()> {
|
||||
self.cache
|
||||
.lock()
|
||||
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||
.clear();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Hash data using SHA-256
|
||||
fn hash_data(data: &[u8]) -> BlobHash {
|
||||
use sha2::{
|
||||
Digest,
|
||||
Sha256,
|
||||
};
|
||||
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BlobStore {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine whether data should be stored as a blob
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::should_use_blob;
|
||||
///
|
||||
/// let small_data = vec![1, 2, 3];
|
||||
/// assert!(!should_use_blob(&small_data));
|
||||
///
|
||||
/// let large_data = vec![0u8; 100_000];
|
||||
/// assert!(should_use_blob(&large_data));
|
||||
/// ```
|
||||
pub fn should_use_blob(data: &[u8]) -> bool {
|
||||
data.len() > BLOB_THRESHOLD
|
||||
}
|
||||
|
||||
/// Create ComponentData, automatically choosing inline vs blob
|
||||
///
|
||||
/// This helper function inspects the data size and creates the appropriate
|
||||
/// ComponentData variant.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::{
|
||||
/// BlobStore,
|
||||
/// create_component_data,
|
||||
/// };
|
||||
///
|
||||
/// let store = BlobStore::new();
|
||||
///
|
||||
/// // Small data goes inline
|
||||
/// let small_data = vec![1, 2, 3];
|
||||
/// let component_data = create_component_data(small_data, &store).unwrap();
|
||||
///
|
||||
/// // Large data becomes a blob reference
|
||||
/// let large_data = vec![0u8; 100_000];
|
||||
/// let component_data = create_component_data(large_data, &store).unwrap();
|
||||
/// ```
|
||||
pub fn create_component_data(data: Vec<u8>, blob_store: &BlobStore) -> Result<ComponentData> {
|
||||
if should_use_blob(&data) {
|
||||
let size = data.len() as u64;
|
||||
let hash = blob_store.store_blob(data)?;
|
||||
Ok(ComponentData::BlobRef { hash, size })
|
||||
} else {
|
||||
Ok(ComponentData::Inline(data))
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve the actual data from ComponentData
|
||||
///
|
||||
/// This resolves blob references by fetching from the blob store.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::{
|
||||
/// BlobStore,
|
||||
/// ComponentData,
|
||||
/// get_component_data,
|
||||
/// };
|
||||
///
|
||||
/// let store = BlobStore::new();
|
||||
///
|
||||
/// // Inline data
|
||||
/// let inline = ComponentData::Inline(vec![1, 2, 3]);
|
||||
/// let data = get_component_data(&inline, &store).unwrap();
|
||||
/// assert_eq!(data, vec![1, 2, 3]);
|
||||
/// ```
|
||||
pub fn get_component_data(data: &ComponentData, blob_store: &BlobStore) -> Result<Vec<u8>> {
|
||||
match data {
|
||||
| ComponentData::Inline(bytes) => Ok(bytes.clone()),
|
||||
| ComponentData::BlobRef { hash, size: _ } => blob_store
|
||||
.get_blob(hash)?
|
||||
.ok_or_else(|| NetworkingError::Blob(format!("Blob not found: {:x?}", hash))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a blob from the network
|
||||
///
|
||||
/// **NOTE:** This is a stub for Phase 6. Will be implemented in later phases
|
||||
/// when we have full gossip integration.
|
||||
pub fn request_blob_from_network(_hash: &BlobHash, _blob_store: &BlobStore) -> Result<()> {
|
||||
// TODO: Implement in later phases with iroh-gossip
|
||||
debug!("request_blob_from_network not yet implemented");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Bevy system to handle blob requests
|
||||
///
|
||||
/// This system processes incoming blob requests and serves blobs to peers.
|
||||
///
|
||||
/// **NOTE:** Stub implementation for Phase 6.
|
||||
pub fn blob_transfer_system(_blob_store: Option<Res<BlobStore>>) {
|
||||
// TODO: Implement when we have gossip networking
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_blob_store_creation() {
|
||||
let store = BlobStore::new();
|
||||
assert_eq!(store.cache_size().unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_and_retrieve_blob() {
|
||||
let store = BlobStore::new();
|
||||
let data = vec![1, 2, 3, 4, 5];
|
||||
|
||||
let hash = store.store_blob(data.clone()).unwrap();
|
||||
let retrieved = store.get_blob(&hash).unwrap();
|
||||
|
||||
assert_eq!(retrieved, Some(data));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blob_hash_is_deterministic() {
|
||||
let store = BlobStore::new();
|
||||
let data = vec![1, 2, 3, 4, 5];
|
||||
|
||||
let hash1 = store.store_blob(data.clone()).unwrap();
|
||||
let hash2 = store.store_blob(data.clone()).unwrap();
|
||||
|
||||
assert_eq!(hash1, hash2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_has_blob() {
|
||||
let store = BlobStore::new();
|
||||
let data = vec![1, 2, 3, 4, 5];
|
||||
|
||||
let hash = store.store_blob(data).unwrap();
|
||||
assert!(store.has_blob(&hash).unwrap());
|
||||
|
||||
let fake_hash = vec![0; 32];
|
||||
assert!(!store.has_blob(&fake_hash).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_cache() {
|
||||
let store = BlobStore::new();
|
||||
let data = vec![1, 2, 3, 4, 5];
|
||||
|
||||
store.store_blob(data).unwrap();
|
||||
assert_eq!(store.cache_size().unwrap(), 1);
|
||||
|
||||
store.clear_cache().unwrap();
|
||||
assert_eq!(store.cache_size().unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_use_blob() {
|
||||
let small_data = vec![0u8; 1000];
|
||||
assert!(!should_use_blob(&small_data));
|
||||
|
||||
let large_data = vec![0u8; 100_000];
|
||||
assert!(should_use_blob(&large_data));
|
||||
|
||||
let threshold_data = vec![0u8; BLOB_THRESHOLD];
|
||||
assert!(!should_use_blob(&threshold_data));
|
||||
|
||||
let over_threshold = vec![0u8; BLOB_THRESHOLD + 1];
|
||||
assert!(should_use_blob(&over_threshold));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_component_data_inline() {
|
||||
let store = BlobStore::new();
|
||||
let small_data = vec![1, 2, 3];
|
||||
|
||||
let component_data = create_component_data(small_data.clone(), &store).unwrap();
|
||||
|
||||
match component_data {
|
||||
| ComponentData::Inline(data) => assert_eq!(data, small_data),
|
||||
| ComponentData::BlobRef { .. } => panic!("Expected inline data"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_component_data_blob() {
|
||||
let store = BlobStore::new();
|
||||
let large_data = vec![0u8; 100_000];
|
||||
|
||||
let component_data = create_component_data(large_data.clone(), &store).unwrap();
|
||||
|
||||
match component_data {
|
||||
| ComponentData::BlobRef { hash, size } => {
|
||||
assert_eq!(size, 100_000);
|
||||
assert!(store.has_blob(&hash).unwrap());
|
||||
},
|
||||
| ComponentData::Inline(_) => panic!("Expected blob reference"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_component_data_inline() {
|
||||
let store = BlobStore::new();
|
||||
let inline = ComponentData::Inline(vec![1, 2, 3]);
|
||||
|
||||
let data = get_component_data(&inline, &store).unwrap();
|
||||
assert_eq!(data, vec![1, 2, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_component_data_blob() {
|
||||
let store = BlobStore::new();
|
||||
let large_data = vec![0u8; 100_000];
|
||||
let hash = store.store_blob(large_data.clone()).unwrap();
|
||||
|
||||
let blob_ref = ComponentData::BlobRef {
|
||||
hash,
|
||||
size: 100_000,
|
||||
};
|
||||
|
||||
let data = get_component_data(&blob_ref, &store).unwrap();
|
||||
assert_eq!(data, large_data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_component_data_missing_blob() {
|
||||
let store = BlobStore::new();
|
||||
let fake_hash = vec![0; 32];
|
||||
|
||||
let blob_ref = ComponentData::BlobRef {
|
||||
hash: fake_hash,
|
||||
size: 1000,
|
||||
};
|
||||
|
||||
let result = get_component_data(&blob_ref, &store);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
130
crates/libmarathon/src/networking/change_detection.rs
Normal file
130
crates/libmarathon/src/networking/change_detection.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
//! Change detection for networked entities
|
||||
//!
|
||||
//! This module provides systems that detect when networked components change
|
||||
//! and prepare them for delta generation.
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::networking::{
|
||||
NetworkedEntity,
|
||||
NetworkedTransform,
|
||||
};
|
||||
|
||||
/// System to automatically detect Transform changes and mark entity for sync
|
||||
///
|
||||
/// This system detects changes to Transform components on networked entities
|
||||
/// and triggers persistence by accessing `NetworkedEntity` mutably (which marks
|
||||
/// it as changed via Bevy's change detection).
|
||||
///
|
||||
/// Add this system to your app if you want automatic synchronization of
|
||||
/// Transform changes:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::auto_detect_transform_changes_system;
|
||||
///
|
||||
/// App::new().add_systems(Update, auto_detect_transform_changes_system);
|
||||
/// ```
|
||||
pub fn auto_detect_transform_changes_system(
|
||||
mut query: Query<
|
||||
(Entity, &mut NetworkedEntity, &Transform),
|
||||
(
|
||||
With<NetworkedTransform>,
|
||||
Or<(Changed<Transform>, Changed<GlobalTransform>)>,
|
||||
),
|
||||
>,
|
||||
) {
|
||||
// Count how many changed entities we found
|
||||
let count = query.iter().count();
|
||||
if count > 0 {
|
||||
debug!(
|
||||
"auto_detect_transform_changes_system: Found {} entities with changed Transform",
|
||||
count
|
||||
);
|
||||
}
|
||||
|
||||
// Simply accessing &mut NetworkedEntity triggers Bevy's change detection
|
||||
for (_entity, mut networked, transform) in query.iter_mut() {
|
||||
debug!(
|
||||
"Marking NetworkedEntity {:?} as changed due to Transform change (pos: {:?})",
|
||||
networked.network_id, transform.translation
|
||||
);
|
||||
// No-op - the mutable access itself marks NetworkedEntity as changed
|
||||
// This will trigger the delta generation system
|
||||
let _ = &mut *networked;
|
||||
}
|
||||
}
|
||||
|
||||
/// Resource to track the last sync version for each entity
|
||||
///
|
||||
/// This helps us avoid sending redundant deltas for the same changes.
|
||||
#[derive(Resource, Default)]
|
||||
pub struct LastSyncVersions {
|
||||
/// Map from network_id to the last vector clock we synced
|
||||
versions: std::collections::HashMap<uuid::Uuid, u64>,
|
||||
}
|
||||
|
||||
impl LastSyncVersions {
|
||||
/// Check if we should sync this entity based on version
|
||||
pub fn should_sync(&self, network_id: uuid::Uuid, version: u64) -> bool {
|
||||
match self.versions.get(&network_id) {
|
||||
| Some(&last_version) => version > last_version,
|
||||
| None => true, // Never synced before
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the last synced version for an entity
|
||||
pub fn update(&mut self, network_id: uuid::Uuid, version: u64) {
|
||||
self.versions.insert(network_id, version);
|
||||
}
|
||||
|
||||
/// Remove tracking for an entity (when despawned)
|
||||
pub fn remove(&mut self, network_id: uuid::Uuid) {
|
||||
self.versions.remove(&network_id);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_last_sync_versions() {
|
||||
let mut versions = LastSyncVersions::default();
|
||||
let id = uuid::Uuid::new_v4();
|
||||
|
||||
// Should sync when never synced before
|
||||
assert!(versions.should_sync(id, 1));
|
||||
|
||||
// Update to version 1
|
||||
versions.update(id, 1);
|
||||
|
||||
// Should not sync same version
|
||||
assert!(!versions.should_sync(id, 1));
|
||||
|
||||
// Should not sync older version
|
||||
assert!(!versions.should_sync(id, 0));
|
||||
|
||||
// Should sync newer version
|
||||
assert!(versions.should_sync(id, 2));
|
||||
|
||||
// Remove and should sync again
|
||||
versions.remove(id);
|
||||
assert!(versions.should_sync(id, 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_entities() {
|
||||
let mut versions = LastSyncVersions::default();
|
||||
let id1 = uuid::Uuid::new_v4();
|
||||
let id2 = uuid::Uuid::new_v4();
|
||||
|
||||
versions.update(id1, 5);
|
||||
versions.update(id2, 3);
|
||||
|
||||
assert!(!versions.should_sync(id1, 4));
|
||||
assert!(versions.should_sync(id1, 6));
|
||||
assert!(!versions.should_sync(id2, 2));
|
||||
assert!(versions.should_sync(id2, 4));
|
||||
}
|
||||
}
|
||||
410
crates/libmarathon/src/networking/components.rs
Normal file
410
crates/libmarathon/src/networking/components.rs
Normal file
@@ -0,0 +1,410 @@
|
||||
//! Networked entity components
|
||||
//!
|
||||
//! This module defines components that mark entities as networked and track
|
||||
//! their network identity across the distributed system.
|
||||
|
||||
use bevy::prelude::*;
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use crate::networking::vector_clock::NodeId;
|
||||
|
||||
/// Marker component indicating an entity should be synchronized over the
|
||||
/// network
|
||||
///
|
||||
/// Add this component to any entity that should have its state synchronized
|
||||
/// across peers. The networking system will automatically track changes and
|
||||
/// broadcast deltas.
|
||||
///
|
||||
/// # Relationship with Persisted
|
||||
///
|
||||
/// NetworkedEntity and Persisted are complementary:
|
||||
/// - `Persisted` - Entity state saved to local SQLite database
|
||||
/// - `NetworkedEntity` - Entity state synchronized across network peers
|
||||
///
|
||||
/// Most entities will have both components for full durability and sync.
|
||||
///
|
||||
/// # Network Identity
|
||||
///
|
||||
/// Each networked entity has:
|
||||
/// - `network_id` - Globally unique UUID for this entity across all peers
|
||||
/// - `owner_node_id` - Node that originally created this entity
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::NetworkedEntity;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// fn spawn_networked_entity(mut commands: Commands) {
|
||||
/// let node_id = Uuid::new_v4();
|
||||
///
|
||||
/// commands.spawn((NetworkedEntity::new(node_id), Transform::default()));
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Component, Reflect, Debug, Clone, Serialize, Deserialize)]
|
||||
#[reflect(Component)]
|
||||
pub struct NetworkedEntity {
|
||||
/// Globally unique network ID for this entity
|
||||
///
|
||||
/// This ID is used to identify the entity across all peers in the network.
|
||||
/// When a peer receives an EntityDelta, it uses this ID to locate the
|
||||
/// corresponding local entity.
|
||||
pub network_id: uuid::Uuid,
|
||||
|
||||
/// Node that created this entity
|
||||
///
|
||||
/// Used for conflict resolution and ownership tracking. When two nodes
|
||||
/// concurrently create entities, the owner_node_id can be used as a
|
||||
/// tiebreaker.
|
||||
pub owner_node_id: NodeId,
|
||||
}
|
||||
|
||||
impl NetworkedEntity {
|
||||
/// Create a new networked entity
|
||||
///
|
||||
/// Generates a new random network_id and sets the owner to the specified
|
||||
/// node.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::NetworkedEntity;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node_id = Uuid::new_v4();
|
||||
/// let entity = NetworkedEntity::new(node_id);
|
||||
///
|
||||
/// assert_eq!(entity.owner_node_id, node_id);
|
||||
/// ```
|
||||
pub fn new(owner_node_id: NodeId) -> Self {
|
||||
Self {
|
||||
network_id: uuid::Uuid::new_v4(),
|
||||
owner_node_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a networked entity with a specific network ID
|
||||
///
|
||||
/// Used when receiving entities from remote peers - we need to use their
|
||||
/// network_id rather than generating a new one.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::NetworkedEntity;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let network_id = Uuid::new_v4();
|
||||
/// let owner_id = Uuid::new_v4();
|
||||
/// let entity = NetworkedEntity::with_id(network_id, owner_id);
|
||||
///
|
||||
/// assert_eq!(entity.network_id, network_id);
|
||||
/// assert_eq!(entity.owner_node_id, owner_id);
|
||||
/// ```
|
||||
pub fn with_id(network_id: uuid::Uuid, owner_node_id: NodeId) -> Self {
|
||||
Self {
|
||||
network_id,
|
||||
owner_node_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this node owns the entity
|
||||
pub fn is_owned_by(&self, node_id: NodeId) -> bool {
|
||||
self.owner_node_id == node_id
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NetworkedEntity {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
network_id: uuid::Uuid::new_v4(),
|
||||
owner_node_id: uuid::Uuid::new_v4(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for Transform component that enables CRDT synchronization
|
||||
///
|
||||
/// This is a marker component used alongside Transform to indicate that
|
||||
/// Transform changes should be synchronized using Last-Write-Wins semantics.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::{
|
||||
/// NetworkedEntity,
|
||||
/// NetworkedTransform,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// fn spawn_synced_transform(mut commands: Commands) {
|
||||
/// let node_id = Uuid::new_v4();
|
||||
///
|
||||
/// commands.spawn((
|
||||
/// NetworkedEntity::new(node_id),
|
||||
/// Transform::default(),
|
||||
/// NetworkedTransform,
|
||||
/// ));
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Component, Reflect, Debug, Clone, Copy, Default)]
|
||||
#[reflect(Component)]
|
||||
pub struct NetworkedTransform;
|
||||
|
||||
/// Wrapper for a selection component using OR-Set semantics
|
||||
///
|
||||
/// Tracks a set of selected entity network IDs. Uses OR-Set (Observed-Remove)
|
||||
/// CRDT to handle concurrent add/remove operations correctly.
|
||||
///
|
||||
/// # OR-Set Semantics
|
||||
///
|
||||
/// - Concurrent adds and removes: add wins
|
||||
/// - Each add has a unique operation ID
|
||||
/// - Removes reference specific add operation IDs
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::{
|
||||
/// NetworkedEntity,
|
||||
/// NetworkedSelection,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// fn create_selection(mut commands: Commands) {
|
||||
/// let node_id = Uuid::new_v4();
|
||||
/// let mut selection = NetworkedSelection::new();
|
||||
///
|
||||
/// // Add some entities to the selection
|
||||
/// selection.selected_ids.insert(Uuid::new_v4());
|
||||
/// selection.selected_ids.insert(Uuid::new_v4());
|
||||
///
|
||||
/// commands.spawn((NetworkedEntity::new(node_id), selection));
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Component, Reflect, Debug, Clone, Default)]
|
||||
#[reflect(Component)]
|
||||
pub struct NetworkedSelection {
|
||||
/// Set of selected entity network IDs
|
||||
///
|
||||
/// This will be synchronized using OR-Set CRDT semantics in later phases.
|
||||
/// For now, it's a simple HashSet.
|
||||
pub selected_ids: std::collections::HashSet<uuid::Uuid>,
|
||||
}
|
||||
|
||||
impl NetworkedSelection {
|
||||
/// Create a new empty selection
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
selected_ids: std::collections::HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an entity to the selection
|
||||
pub fn add(&mut self, entity_id: uuid::Uuid) {
|
||||
self.selected_ids.insert(entity_id);
|
||||
}
|
||||
|
||||
/// Remove an entity from the selection
|
||||
pub fn remove(&mut self, entity_id: uuid::Uuid) {
|
||||
self.selected_ids.remove(&entity_id);
|
||||
}
|
||||
|
||||
/// Check if an entity is selected
|
||||
pub fn contains(&self, entity_id: uuid::Uuid) -> bool {
|
||||
self.selected_ids.contains(&entity_id)
|
||||
}
|
||||
|
||||
/// Clear all selections
|
||||
pub fn clear(&mut self) {
|
||||
self.selected_ids.clear();
|
||||
}
|
||||
|
||||
/// Get the number of selected entities
|
||||
pub fn len(&self) -> usize {
|
||||
self.selected_ids.len()
|
||||
}
|
||||
|
||||
/// Check if the selection is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.selected_ids.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for a drawing path component using Sequence CRDT semantics
|
||||
///
|
||||
/// Represents an ordered sequence of points that can be collaboratively edited.
|
||||
/// Uses RGA (Replicated Growable Array) CRDT to maintain consistent ordering
|
||||
/// across concurrent insertions.
|
||||
///
|
||||
/// # RGA Semantics
|
||||
///
|
||||
/// - Each point has a unique operation ID
|
||||
/// - Points reference the ID of the point they're inserted after
|
||||
/// - Concurrent insertions maintain consistent ordering
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::{
|
||||
/// NetworkedDrawingPath,
|
||||
/// NetworkedEntity,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// fn create_path(mut commands: Commands) {
|
||||
/// let node_id = Uuid::new_v4();
|
||||
/// let mut path = NetworkedDrawingPath::new();
|
||||
///
|
||||
/// // Add some points to the path
|
||||
/// path.points.push(Vec2::new(0.0, 0.0));
|
||||
/// path.points.push(Vec2::new(10.0, 10.0));
|
||||
/// path.points.push(Vec2::new(20.0, 5.0));
|
||||
///
|
||||
/// commands.spawn((NetworkedEntity::new(node_id), path));
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Component, Reflect, Debug, Clone, Default)]
|
||||
#[reflect(Component)]
|
||||
pub struct NetworkedDrawingPath {
|
||||
/// Ordered sequence of points in the path
|
||||
///
|
||||
/// This will be synchronized using RGA (Sequence CRDT) semantics in later
|
||||
/// phases. For now, it's a simple Vec.
|
||||
pub points: Vec<Vec2>,
|
||||
|
||||
/// Drawing stroke color
|
||||
pub color: Color,
|
||||
|
||||
/// Stroke width
|
||||
pub width: f32,
|
||||
}
|
||||
|
||||
impl NetworkedDrawingPath {
|
||||
/// Create a new empty drawing path
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
points: Vec::new(),
|
||||
color: Color::BLACK,
|
||||
width: 2.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a path with a specific color and width
|
||||
pub fn with_style(color: Color, width: f32) -> Self {
|
||||
Self {
|
||||
points: Vec::new(),
|
||||
color,
|
||||
width,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a point to the end of the path
|
||||
pub fn push(&mut self, point: Vec2) {
|
||||
self.points.push(point);
|
||||
}
|
||||
|
||||
/// Get the number of points in the path
|
||||
pub fn len(&self) -> usize {
|
||||
self.points.len()
|
||||
}
|
||||
|
||||
/// Check if the path is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.points.is_empty()
|
||||
}
|
||||
|
||||
/// Clear all points from the path
|
||||
pub fn clear(&mut self) {
|
||||
self.points.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_networked_entity_new() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let entity = NetworkedEntity::new(node_id);
|
||||
|
||||
assert_eq!(entity.owner_node_id, node_id);
|
||||
assert_ne!(entity.network_id, uuid::Uuid::nil());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_networked_entity_with_id() {
|
||||
let network_id = uuid::Uuid::new_v4();
|
||||
let owner_id = uuid::Uuid::new_v4();
|
||||
let entity = NetworkedEntity::with_id(network_id, owner_id);
|
||||
|
||||
assert_eq!(entity.network_id, network_id);
|
||||
assert_eq!(entity.owner_node_id, owner_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_networked_entity_is_owned_by() {
|
||||
let owner_id = uuid::Uuid::new_v4();
|
||||
let other_id = uuid::Uuid::new_v4();
|
||||
let entity = NetworkedEntity::new(owner_id);
|
||||
|
||||
assert!(entity.is_owned_by(owner_id));
|
||||
assert!(!entity.is_owned_by(other_id));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_networked_selection() {
|
||||
let mut selection = NetworkedSelection::new();
|
||||
let id1 = uuid::Uuid::new_v4();
|
||||
let id2 = uuid::Uuid::new_v4();
|
||||
|
||||
assert!(selection.is_empty());
|
||||
|
||||
selection.add(id1);
|
||||
assert_eq!(selection.len(), 1);
|
||||
assert!(selection.contains(id1));
|
||||
|
||||
selection.add(id2);
|
||||
assert_eq!(selection.len(), 2);
|
||||
assert!(selection.contains(id2));
|
||||
|
||||
selection.remove(id1);
|
||||
assert_eq!(selection.len(), 1);
|
||||
assert!(!selection.contains(id1));
|
||||
|
||||
selection.clear();
|
||||
assert!(selection.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_networked_drawing_path() {
|
||||
let mut path = NetworkedDrawingPath::new();
|
||||
|
||||
assert!(path.is_empty());
|
||||
|
||||
path.push(Vec2::new(0.0, 0.0));
|
||||
assert_eq!(path.len(), 1);
|
||||
|
||||
path.push(Vec2::new(10.0, 10.0));
|
||||
assert_eq!(path.len(), 2);
|
||||
|
||||
path.clear();
|
||||
assert!(path.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drawing_path_with_style() {
|
||||
let path = NetworkedDrawingPath::with_style(Color::srgb(1.0, 0.0, 0.0), 5.0);
|
||||
|
||||
assert_eq!(path.color, Color::srgb(1.0, 0.0, 0.0));
|
||||
assert_eq!(path.width, 5.0);
|
||||
}
|
||||
}
|
||||
251
crates/libmarathon/src/networking/delta_generation.rs
Normal file
251
crates/libmarathon/src/networking/delta_generation.rs
Normal file
@@ -0,0 +1,251 @@
|
||||
//! Delta generation system for broadcasting entity changes
|
||||
//!
|
||||
//! This module implements the core delta generation logic that detects changed
|
||||
//! entities and broadcasts EntityDelta messages.
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::networking::{
|
||||
NetworkedEntity,
|
||||
change_detection::LastSyncVersions,
|
||||
gossip_bridge::GossipBridge,
|
||||
messages::{
|
||||
EntityDelta,
|
||||
SyncMessage,
|
||||
VersionedMessage,
|
||||
},
|
||||
operation_builder::build_entity_operations,
|
||||
vector_clock::{
|
||||
NodeId,
|
||||
VectorClock,
|
||||
},
|
||||
};
|
||||
|
||||
/// Resource wrapping our node's vector clock
|
||||
///
|
||||
/// This tracks the logical time for our local operations.
|
||||
#[derive(Resource)]
|
||||
pub struct NodeVectorClock {
|
||||
pub node_id: NodeId,
|
||||
pub clock: VectorClock,
|
||||
}
|
||||
|
||||
impl NodeVectorClock {
|
||||
pub fn new(node_id: NodeId) -> Self {
|
||||
Self {
|
||||
node_id,
|
||||
clock: VectorClock::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment our clock for a new operation
|
||||
pub fn tick(&mut self) -> u64 {
|
||||
self.clock.increment(self.node_id)
|
||||
}
|
||||
|
||||
/// Get current sequence number for our node
|
||||
pub fn sequence(&self) -> u64 {
|
||||
self.clock.get(self.node_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// System to generate and broadcast EntityDelta messages
|
||||
///
|
||||
/// This system:
|
||||
/// 1. Queries for Changed<NetworkedEntity>
|
||||
/// 2. Serializes all components on those entities
|
||||
/// 3. Builds EntityDelta messages
|
||||
/// 4. Broadcasts via GossipBridge
|
||||
///
|
||||
/// Add this to your app to enable delta broadcasting:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::generate_delta_system;
|
||||
///
|
||||
/// App::new().add_systems(Update, generate_delta_system);
|
||||
/// ```
|
||||
pub fn generate_delta_system(world: &mut World) {
|
||||
// Check if bridge exists
|
||||
if world.get_resource::<GossipBridge>().is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
let changed_entities: Vec<(Entity, uuid::Uuid, uuid::Uuid)> = {
|
||||
let mut query =
|
||||
world.query_filtered::<(Entity, &NetworkedEntity), Changed<NetworkedEntity>>();
|
||||
query
|
||||
.iter(world)
|
||||
.map(|(entity, networked)| (entity, networked.network_id, networked.owner_node_id))
|
||||
.collect()
|
||||
};
|
||||
|
||||
if changed_entities.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
debug!(
|
||||
"generate_delta_system: Processing {} changed entities",
|
||||
changed_entities.len()
|
||||
);
|
||||
|
||||
// Process each entity separately to avoid borrow conflicts
|
||||
for (entity, network_id, _owner_node_id) in changed_entities {
|
||||
// Phase 1: Check and update clocks, collect data
|
||||
let mut system_state: bevy::ecs::system::SystemState<(
|
||||
Res<GossipBridge>,
|
||||
Res<AppTypeRegistry>,
|
||||
ResMut<NodeVectorClock>,
|
||||
ResMut<LastSyncVersions>,
|
||||
Option<ResMut<crate::networking::OperationLog>>,
|
||||
)> = bevy::ecs::system::SystemState::new(world);
|
||||
|
||||
let (node_id, vector_clock, current_seq) = {
|
||||
let (_, _, mut node_clock, last_versions, _) = system_state.get_mut(world);
|
||||
|
||||
// Check if we should sync this entity
|
||||
let current_seq = node_clock.sequence();
|
||||
if !last_versions.should_sync(network_id, current_seq) {
|
||||
drop(last_versions);
|
||||
drop(node_clock);
|
||||
system_state.apply(world);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Increment our vector clock
|
||||
node_clock.tick();
|
||||
|
||||
(node_clock.node_id, node_clock.clock.clone(), current_seq)
|
||||
};
|
||||
|
||||
// Phase 2: Build operations (needs world access without holding other borrows)
|
||||
let operations = {
|
||||
let type_registry = world.resource::<AppTypeRegistry>().read();
|
||||
let ops = build_entity_operations(
|
||||
entity,
|
||||
world,
|
||||
node_id,
|
||||
vector_clock.clone(),
|
||||
&type_registry,
|
||||
None, // blob_store - will be added in later phases
|
||||
);
|
||||
drop(type_registry);
|
||||
ops
|
||||
};
|
||||
|
||||
if operations.is_empty() {
|
||||
system_state.apply(world);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Phase 3: Record, broadcast, and update
|
||||
let delta = {
|
||||
let (bridge, _, _, mut last_versions, mut operation_log) = system_state.get_mut(world);
|
||||
|
||||
// Create EntityDelta
|
||||
let delta = EntityDelta::new(network_id, node_id, vector_clock.clone(), operations);
|
||||
|
||||
// Record in operation log for anti-entropy
|
||||
if let Some(ref mut log) = operation_log {
|
||||
log.record_operation(delta.clone());
|
||||
}
|
||||
|
||||
// Wrap in VersionedMessage
|
||||
let message = VersionedMessage::new(SyncMessage::EntityDelta {
|
||||
entity_id: delta.entity_id,
|
||||
node_id: delta.node_id,
|
||||
vector_clock: delta.vector_clock.clone(),
|
||||
operations: delta.operations.clone(),
|
||||
});
|
||||
|
||||
// Broadcast
|
||||
if let Err(e) = bridge.send(message) {
|
||||
error!("Failed to broadcast EntityDelta: {}", e);
|
||||
} else {
|
||||
debug!(
|
||||
"Broadcast EntityDelta for entity {:?} with {} operations",
|
||||
network_id,
|
||||
delta.operations.len()
|
||||
);
|
||||
last_versions.update(network_id, current_seq);
|
||||
}
|
||||
|
||||
delta
|
||||
};
|
||||
|
||||
// Phase 4: Update component vector clocks for local modifications
|
||||
{
|
||||
if let Some(mut component_clocks) =
|
||||
world.get_resource_mut::<crate::networking::ComponentVectorClocks>()
|
||||
{
|
||||
for op in &delta.operations {
|
||||
if let crate::networking::ComponentOp::Set {
|
||||
component_type,
|
||||
vector_clock: op_clock,
|
||||
..
|
||||
} = op
|
||||
{
|
||||
component_clocks.set(
|
||||
network_id,
|
||||
component_type.clone(),
|
||||
op_clock.clone(),
|
||||
node_id,
|
||||
);
|
||||
debug!(
|
||||
"Updated local vector clock for {} on entity {:?} (node_id: {:?})",
|
||||
component_type, network_id, node_id
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
system_state.apply(world);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_node_vector_clock_creation() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = NodeVectorClock::new(node_id);
|
||||
|
||||
assert_eq!(clock.node_id, node_id);
|
||||
assert_eq!(clock.sequence(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_vector_clock_tick() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let mut clock = NodeVectorClock::new(node_id);
|
||||
|
||||
assert_eq!(clock.tick(), 1);
|
||||
assert_eq!(clock.sequence(), 1);
|
||||
|
||||
assert_eq!(clock.tick(), 2);
|
||||
assert_eq!(clock.sequence(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_vector_clock_multiple_nodes() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = NodeVectorClock::new(node1);
|
||||
let mut clock2 = NodeVectorClock::new(node2);
|
||||
|
||||
clock1.tick();
|
||||
clock2.tick();
|
||||
|
||||
assert_eq!(clock1.sequence(), 1);
|
||||
assert_eq!(clock2.sequence(), 1);
|
||||
|
||||
// Merge clocks
|
||||
clock1.clock.merge(&clock2.clock);
|
||||
assert_eq!(clock1.clock.get(node1), 1);
|
||||
assert_eq!(clock1.clock.get(node2), 1);
|
||||
}
|
||||
}
|
||||
439
crates/libmarathon/src/networking/entity_map.rs
Normal file
439
crates/libmarathon/src/networking/entity_map.rs
Normal file
@@ -0,0 +1,439 @@
|
||||
//! Bidirectional mapping between network IDs and Bevy entities
|
||||
//!
|
||||
//! This module provides efficient lookup in both directions:
|
||||
//! - network_id → Entity (when receiving remote operations)
|
||||
//! - Entity → network_id (when broadcasting local changes)
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
/// Bidirectional mapping between network IDs and Bevy entities
|
||||
///
|
||||
/// This resource maintains two HashMaps for O(1) lookup in both directions.
|
||||
/// It's updated automatically by the networking systems when entities are
|
||||
/// spawned or despawned.
|
||||
///
|
||||
/// # Thread Safety
|
||||
///
|
||||
/// This is a Bevy Resource, so it's automatically synchronized across systems.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::{
|
||||
/// NetworkEntityMap,
|
||||
/// NetworkedEntity,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// fn example_system(mut map: ResMut<NetworkEntityMap>, query: Query<(Entity, &NetworkedEntity)>) {
|
||||
/// // Register networked entities
|
||||
/// for (entity, networked) in query.iter() {
|
||||
/// map.insert(networked.network_id, entity);
|
||||
/// }
|
||||
///
|
||||
/// // Later, look up by network ID
|
||||
/// let network_id = Uuid::new_v4();
|
||||
/// if let Some(entity) = map.get_entity(network_id) {
|
||||
/// println!("Found entity: {:?}", entity);
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Resource, Default, Debug)]
|
||||
pub struct NetworkEntityMap {
|
||||
/// Map from network ID to Bevy Entity
|
||||
network_id_to_entity: HashMap<uuid::Uuid, Entity>,
|
||||
|
||||
/// Map from Bevy Entity to network ID
|
||||
entity_to_network_id: HashMap<Entity, uuid::Uuid>,
|
||||
}
|
||||
|
||||
impl NetworkEntityMap {
|
||||
/// Create a new empty entity map
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
network_id_to_entity: HashMap::new(),
|
||||
entity_to_network_id: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a bidirectional mapping
|
||||
///
|
||||
/// If the network_id or entity already exists in the map, the old mapping
|
||||
/// is removed first to maintain consistency.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::NetworkEntityMap;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// # let mut world = World::new();
|
||||
/// # let entity = world.spawn_empty().id();
|
||||
/// let mut map = NetworkEntityMap::new();
|
||||
/// let network_id = Uuid::new_v4();
|
||||
///
|
||||
/// map.insert(network_id, entity);
|
||||
/// assert_eq!(map.get_entity(network_id), Some(entity));
|
||||
/// assert_eq!(map.get_network_id(entity), Some(network_id));
|
||||
/// ```
|
||||
pub fn insert(&mut self, network_id: uuid::Uuid, entity: Entity) {
|
||||
// Remove old mappings if they exist
|
||||
if let Some(old_entity) = self.network_id_to_entity.get(&network_id) {
|
||||
self.entity_to_network_id.remove(old_entity);
|
||||
}
|
||||
if let Some(old_network_id) = self.entity_to_network_id.get(&entity) {
|
||||
self.network_id_to_entity.remove(old_network_id);
|
||||
}
|
||||
|
||||
// Insert new mappings
|
||||
self.network_id_to_entity.insert(network_id, entity);
|
||||
self.entity_to_network_id.insert(entity, network_id);
|
||||
}
|
||||
|
||||
/// Get the Bevy Entity for a network ID
|
||||
///
|
||||
/// Returns None if the network ID is not in the map.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::NetworkEntityMap;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// # let mut world = World::new();
|
||||
/// # let entity = world.spawn_empty().id();
|
||||
/// let mut map = NetworkEntityMap::new();
|
||||
/// let network_id = Uuid::new_v4();
|
||||
///
|
||||
/// map.insert(network_id, entity);
|
||||
/// assert_eq!(map.get_entity(network_id), Some(entity));
|
||||
///
|
||||
/// let unknown_id = Uuid::new_v4();
|
||||
/// assert_eq!(map.get_entity(unknown_id), None);
|
||||
/// ```
|
||||
pub fn get_entity(&self, network_id: uuid::Uuid) -> Option<Entity> {
|
||||
self.network_id_to_entity.get(&network_id).copied()
|
||||
}
|
||||
|
||||
/// Get the network ID for a Bevy Entity
|
||||
///
|
||||
/// Returns None if the entity is not in the map.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::NetworkEntityMap;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// # let mut world = World::new();
|
||||
/// # let entity = world.spawn_empty().id();
|
||||
/// let mut map = NetworkEntityMap::new();
|
||||
/// let network_id = Uuid::new_v4();
|
||||
///
|
||||
/// map.insert(network_id, entity);
|
||||
/// assert_eq!(map.get_network_id(entity), Some(network_id));
|
||||
///
|
||||
/// # let unknown_entity = world.spawn_empty().id();
|
||||
/// assert_eq!(map.get_network_id(unknown_entity), None);
|
||||
/// ```
|
||||
pub fn get_network_id(&self, entity: Entity) -> Option<uuid::Uuid> {
|
||||
self.entity_to_network_id.get(&entity).copied()
|
||||
}
|
||||
|
||||
/// Remove a mapping by network ID
|
||||
///
|
||||
/// Returns the Entity that was mapped to this network ID, if any.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::NetworkEntityMap;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// # let mut world = World::new();
|
||||
/// # let entity = world.spawn_empty().id();
|
||||
/// let mut map = NetworkEntityMap::new();
|
||||
/// let network_id = Uuid::new_v4();
|
||||
///
|
||||
/// map.insert(network_id, entity);
|
||||
/// assert_eq!(map.remove_by_network_id(network_id), Some(entity));
|
||||
/// assert_eq!(map.get_entity(network_id), None);
|
||||
/// ```
|
||||
pub fn remove_by_network_id(&mut self, network_id: uuid::Uuid) -> Option<Entity> {
|
||||
if let Some(entity) = self.network_id_to_entity.remove(&network_id) {
|
||||
self.entity_to_network_id.remove(&entity);
|
||||
Some(entity)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a mapping by Entity
|
||||
///
|
||||
/// Returns the network ID that was mapped to this entity, if any.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::NetworkEntityMap;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// # let mut world = World::new();
|
||||
/// # let entity = world.spawn_empty().id();
|
||||
/// let mut map = NetworkEntityMap::new();
|
||||
/// let network_id = Uuid::new_v4();
|
||||
///
|
||||
/// map.insert(network_id, entity);
|
||||
/// assert_eq!(map.remove_by_entity(entity), Some(network_id));
|
||||
/// assert_eq!(map.get_network_id(entity), None);
|
||||
/// ```
|
||||
pub fn remove_by_entity(&mut self, entity: Entity) -> Option<uuid::Uuid> {
|
||||
if let Some(network_id) = self.entity_to_network_id.remove(&entity) {
|
||||
self.network_id_to_entity.remove(&network_id);
|
||||
Some(network_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a network ID exists in the map
|
||||
pub fn contains_network_id(&self, network_id: uuid::Uuid) -> bool {
|
||||
self.network_id_to_entity.contains_key(&network_id)
|
||||
}
|
||||
|
||||
/// Check if an entity exists in the map
|
||||
pub fn contains_entity(&self, entity: Entity) -> bool {
|
||||
self.entity_to_network_id.contains_key(&entity)
|
||||
}
|
||||
|
||||
/// Get the number of mapped entities
|
||||
pub fn len(&self) -> usize {
|
||||
self.network_id_to_entity.len()
|
||||
}
|
||||
|
||||
/// Check if the map is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.network_id_to_entity.is_empty()
|
||||
}
|
||||
|
||||
/// Clear all mappings
|
||||
pub fn clear(&mut self) {
|
||||
self.network_id_to_entity.clear();
|
||||
self.entity_to_network_id.clear();
|
||||
}
|
||||
|
||||
/// Get an iterator over all (network_id, entity) pairs
|
||||
pub fn iter(&self) -> impl Iterator<Item = (&uuid::Uuid, &Entity)> {
|
||||
self.network_id_to_entity.iter()
|
||||
}
|
||||
|
||||
/// Get all network IDs
|
||||
pub fn network_ids(&self) -> impl Iterator<Item = &uuid::Uuid> {
|
||||
self.network_id_to_entity.keys()
|
||||
}
|
||||
|
||||
/// Get all entities
|
||||
pub fn entities(&self) -> impl Iterator<Item = &Entity> {
|
||||
self.entity_to_network_id.keys()
|
||||
}
|
||||
}
|
||||
|
||||
/// System to automatically register NetworkedEntity components in the map
|
||||
///
|
||||
/// This system runs in PostUpdate to catch newly spawned networked entities
|
||||
/// and add them to the NetworkEntityMap.
|
||||
///
|
||||
/// Add this to your app:
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::register_networked_entities_system;
|
||||
///
|
||||
/// App::new().add_systems(PostUpdate, register_networked_entities_system);
|
||||
/// ```
|
||||
pub fn register_networked_entities_system(
|
||||
mut map: ResMut<NetworkEntityMap>,
|
||||
query: Query<
|
||||
(Entity, &crate::networking::NetworkedEntity),
|
||||
Added<crate::networking::NetworkedEntity>,
|
||||
>,
|
||||
) {
|
||||
for (entity, networked) in query.iter() {
|
||||
map.insert(networked.network_id, entity);
|
||||
}
|
||||
}
|
||||
|
||||
/// System to automatically unregister despawned entities from the map
|
||||
///
|
||||
/// This system cleans up the NetworkEntityMap when networked entities are
|
||||
/// despawned.
|
||||
///
|
||||
/// Add this to your app:
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::cleanup_despawned_entities_system;
|
||||
///
|
||||
/// App::new().add_systems(PostUpdate, cleanup_despawned_entities_system);
|
||||
/// ```
|
||||
pub fn cleanup_despawned_entities_system(
|
||||
mut map: ResMut<NetworkEntityMap>,
|
||||
mut removed: RemovedComponents<crate::networking::NetworkedEntity>,
|
||||
) {
|
||||
for entity in removed.read() {
|
||||
map.remove_by_entity(entity);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_insert_and_get() {
|
||||
let mut map = NetworkEntityMap::new();
|
||||
let mut world = World::new();
|
||||
let entity = world.spawn_empty().id();
|
||||
let network_id = uuid::Uuid::new_v4();
|
||||
|
||||
map.insert(network_id, entity);
|
||||
|
||||
assert_eq!(map.get_entity(network_id), Some(entity));
|
||||
assert_eq!(map.get_network_id(entity), Some(network_id));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_nonexistent() {
|
||||
let map = NetworkEntityMap::new();
|
||||
let mut world = World::new();
|
||||
let entity = world.spawn_empty().id();
|
||||
let network_id = uuid::Uuid::new_v4();
|
||||
|
||||
assert_eq!(map.get_entity(network_id), None);
|
||||
assert_eq!(map.get_network_id(entity), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_by_network_id() {
|
||||
let mut map = NetworkEntityMap::new();
|
||||
let mut world = World::new();
|
||||
let entity = world.spawn_empty().id();
|
||||
let network_id = uuid::Uuid::new_v4();
|
||||
|
||||
map.insert(network_id, entity);
|
||||
assert_eq!(map.remove_by_network_id(network_id), Some(entity));
|
||||
assert_eq!(map.get_entity(network_id), None);
|
||||
assert_eq!(map.get_network_id(entity), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_by_entity() {
|
||||
let mut map = NetworkEntityMap::new();
|
||||
let mut world = World::new();
|
||||
let entity = world.spawn_empty().id();
|
||||
let network_id = uuid::Uuid::new_v4();
|
||||
|
||||
map.insert(network_id, entity);
|
||||
assert_eq!(map.remove_by_entity(entity), Some(network_id));
|
||||
assert_eq!(map.get_entity(network_id), None);
|
||||
assert_eq!(map.get_network_id(entity), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contains() {
|
||||
let mut map = NetworkEntityMap::new();
|
||||
let mut world = World::new();
|
||||
let entity = world.spawn_empty().id();
|
||||
let network_id = uuid::Uuid::new_v4();
|
||||
|
||||
assert!(!map.contains_network_id(network_id));
|
||||
assert!(!map.contains_entity(entity));
|
||||
|
||||
map.insert(network_id, entity);
|
||||
|
||||
assert!(map.contains_network_id(network_id));
|
||||
assert!(map.contains_entity(entity));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_len_and_is_empty() {
|
||||
let mut map = NetworkEntityMap::new();
|
||||
let mut world = World::new();
|
||||
|
||||
assert!(map.is_empty());
|
||||
assert_eq!(map.len(), 0);
|
||||
|
||||
let entity1 = world.spawn_empty().id();
|
||||
let id1 = uuid::Uuid::new_v4();
|
||||
map.insert(id1, entity1);
|
||||
|
||||
assert!(!map.is_empty());
|
||||
assert_eq!(map.len(), 1);
|
||||
|
||||
let entity2 = world.spawn_empty().id();
|
||||
let id2 = uuid::Uuid::new_v4();
|
||||
map.insert(id2, entity2);
|
||||
|
||||
assert_eq!(map.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear() {
|
||||
let mut map = NetworkEntityMap::new();
|
||||
let mut world = World::new();
|
||||
let entity = world.spawn_empty().id();
|
||||
let network_id = uuid::Uuid::new_v4();
|
||||
|
||||
map.insert(network_id, entity);
|
||||
assert_eq!(map.len(), 1);
|
||||
|
||||
map.clear();
|
||||
assert!(map.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insert_overwrites_old_mapping() {
|
||||
let mut map = NetworkEntityMap::new();
|
||||
let mut world = World::new();
|
||||
let entity1 = world.spawn_empty().id();
|
||||
let entity2 = world.spawn_empty().id();
|
||||
let network_id = uuid::Uuid::new_v4();
|
||||
|
||||
// Insert first mapping
|
||||
map.insert(network_id, entity1);
|
||||
assert_eq!(map.get_entity(network_id), Some(entity1));
|
||||
|
||||
// Insert same network_id with different entity
|
||||
map.insert(network_id, entity2);
|
||||
assert_eq!(map.get_entity(network_id), Some(entity2));
|
||||
assert_eq!(map.get_network_id(entity1), None); // Old mapping removed
|
||||
assert_eq!(map.len(), 1); // Still only one mapping
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iter() {
|
||||
let mut map = NetworkEntityMap::new();
|
||||
let mut world = World::new();
|
||||
let entity1 = world.spawn_empty().id();
|
||||
let entity2 = world.spawn_empty().id();
|
||||
let id1 = uuid::Uuid::new_v4();
|
||||
let id2 = uuid::Uuid::new_v4();
|
||||
|
||||
map.insert(id1, entity1);
|
||||
map.insert(id2, entity2);
|
||||
|
||||
let mut count = 0;
|
||||
for (network_id, entity) in map.iter() {
|
||||
assert!(network_id == &id1 || network_id == &id2);
|
||||
assert!(entity == &entity1 || entity == &entity2);
|
||||
count += 1;
|
||||
}
|
||||
assert_eq!(count, 2);
|
||||
}
|
||||
}
|
||||
77
crates/libmarathon/src/networking/error.rs
Normal file
77
crates/libmarathon/src/networking/error.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
//! Error types for the networking layer
|
||||
|
||||
use std::fmt;
|
||||
|
||||
/// Result type for networking operations
|
||||
pub type Result<T> = std::result::Result<T, NetworkingError>;
|
||||
|
||||
/// Errors that can occur in the networking layer
|
||||
#[derive(Debug)]
|
||||
pub enum NetworkingError {
|
||||
/// Serialization error
|
||||
Serialization(String),
|
||||
|
||||
/// Deserialization error
|
||||
Deserialization(String),
|
||||
|
||||
/// Gossip error (iroh-gossip)
|
||||
Gossip(String),
|
||||
|
||||
/// Blob transfer error (iroh-blobs)
|
||||
Blob(String),
|
||||
|
||||
/// Entity not found in network map
|
||||
EntityNotFound(uuid::Uuid),
|
||||
|
||||
/// Vector clock comparison failed
|
||||
VectorClockError(String),
|
||||
|
||||
/// CRDT merge conflict
|
||||
MergeConflict(String),
|
||||
|
||||
/// Invalid message format
|
||||
InvalidMessage(String),
|
||||
|
||||
/// Authentication/security error
|
||||
SecurityError(String),
|
||||
|
||||
/// Rate limit exceeded
|
||||
RateLimitExceeded,
|
||||
|
||||
/// Other networking errors
|
||||
Other(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for NetworkingError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
| NetworkingError::Serialization(msg) => write!(f, "Serialization error: {}", msg),
|
||||
| NetworkingError::Deserialization(msg) => {
|
||||
write!(f, "Deserialization error: {}", msg)
|
||||
},
|
||||
| NetworkingError::Gossip(msg) => write!(f, "Gossip error: {}", msg),
|
||||
| NetworkingError::Blob(msg) => write!(f, "Blob transfer error: {}", msg),
|
||||
| NetworkingError::EntityNotFound(id) => write!(f, "Entity not found: {}", id),
|
||||
| NetworkingError::VectorClockError(msg) => write!(f, "Vector clock error: {}", msg),
|
||||
| NetworkingError::MergeConflict(msg) => write!(f, "CRDT merge conflict: {}", msg),
|
||||
| NetworkingError::InvalidMessage(msg) => write!(f, "Invalid message: {}", msg),
|
||||
| NetworkingError::SecurityError(msg) => write!(f, "Security error: {}", msg),
|
||||
| NetworkingError::RateLimitExceeded => write!(f, "Rate limit exceeded"),
|
||||
| NetworkingError::Other(msg) => write!(f, "{}", msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for NetworkingError {}
|
||||
|
||||
impl From<bincode::Error> for NetworkingError {
|
||||
fn from(e: bincode::Error) -> Self {
|
||||
NetworkingError::Serialization(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<crate::persistence::PersistenceError> for NetworkingError {
|
||||
fn from(e: crate::persistence::PersistenceError) -> Self {
|
||||
NetworkingError::Other(format!("Persistence error: {}", e))
|
||||
}
|
||||
}
|
||||
177
crates/libmarathon/src/networking/gossip_bridge.rs
Normal file
177
crates/libmarathon/src/networking/gossip_bridge.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
//! Async-to-sync bridge for iroh-gossip integration with Bevy
|
||||
//!
|
||||
//! This module provides the bridge between Bevy's synchronous ECS world and
|
||||
//! iroh-gossip's async runtime. It uses channels to pass messages between the
|
||||
//! async tokio tasks and Bevy systems.
|
||||
//!
|
||||
//! **NOTE:** This is a simplified implementation for Phase 3. Full gossip
|
||||
//! integration will be completed in later phases.
|
||||
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
sync::{
|
||||
Arc,
|
||||
Mutex,
|
||||
},
|
||||
};
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::networking::{
|
||||
error::{
|
||||
NetworkingError,
|
||||
Result,
|
||||
},
|
||||
messages::VersionedMessage,
|
||||
vector_clock::NodeId,
|
||||
};
|
||||
|
||||
/// Bevy resource wrapping the gossip bridge
|
||||
///
|
||||
/// This resource provides the interface between Bevy systems and the async
|
||||
/// gossip network. Systems can send messages via `send()` and poll for
|
||||
/// incoming messages via `try_recv()`.
|
||||
#[derive(Resource, Clone)]
|
||||
pub struct GossipBridge {
|
||||
/// Queue for outgoing messages
|
||||
outgoing: Arc<Mutex<VecDeque<VersionedMessage>>>,
|
||||
|
||||
/// Queue for incoming messages
|
||||
incoming: Arc<Mutex<VecDeque<VersionedMessage>>>,
|
||||
|
||||
/// Our node ID
|
||||
pub node_id: NodeId,
|
||||
}
|
||||
|
||||
impl GossipBridge {
|
||||
/// Create a new gossip bridge
|
||||
pub fn new(node_id: NodeId) -> Self {
|
||||
Self {
|
||||
outgoing: Arc::new(Mutex::new(VecDeque::new())),
|
||||
incoming: Arc::new(Mutex::new(VecDeque::new())),
|
||||
node_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a message to the gossip network
|
||||
pub fn send(&self, message: VersionedMessage) -> Result<()> {
|
||||
self.outgoing
|
||||
.lock()
|
||||
.map_err(|e| NetworkingError::Gossip(format!("Failed to lock outgoing queue: {}", e)))?
|
||||
.push_back(message);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Try to receive a message from the gossip network (from incoming queue)
|
||||
pub fn try_recv(&self) -> Option<VersionedMessage> {
|
||||
self.incoming.lock().ok()?.pop_front()
|
||||
}
|
||||
|
||||
/// Drain all pending messages from the incoming queue atomically
|
||||
///
|
||||
/// This acquires the lock once and drains all messages, preventing race
|
||||
/// conditions where messages could arrive between individual try_recv()
|
||||
/// calls.
|
||||
pub fn drain_incoming(&self) -> Vec<VersionedMessage> {
|
||||
self.incoming
|
||||
.lock()
|
||||
.ok()
|
||||
.map(|mut queue| queue.drain(..).collect())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Try to get a message from the outgoing queue to send to gossip
|
||||
pub fn try_recv_outgoing(&self) -> Option<VersionedMessage> {
|
||||
self.outgoing.lock().ok()?.pop_front()
|
||||
}
|
||||
|
||||
/// Push a message to the incoming queue (for testing/integration)
|
||||
pub fn push_incoming(&self, message: VersionedMessage) -> Result<()> {
|
||||
self.incoming
|
||||
.lock()
|
||||
.map_err(|e| NetworkingError::Gossip(format!("Failed to lock incoming queue: {}", e)))?
|
||||
.push_back(message);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get our node ID
|
||||
pub fn node_id(&self) -> NodeId {
|
||||
self.node_id
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the gossip bridge
|
||||
pub fn init_gossip_bridge(node_id: NodeId) -> GossipBridge {
|
||||
info!("Initializing gossip bridge for node: {}", node_id);
|
||||
GossipBridge::new(node_id)
|
||||
}
|
||||
|
||||
/// Bevy system to broadcast outgoing messages
|
||||
pub fn broadcast_messages_system(/* will be implemented in later phases */) {
|
||||
// This will be populated when we have delta generation
|
||||
}
|
||||
|
||||
/// Bevy system to receive incoming messages
|
||||
///
|
||||
/// **Note:** This is deprecated in favor of `receive_and_apply_deltas_system`
|
||||
/// which provides full CRDT merge semantics. This stub remains for backward
|
||||
/// compatibility.
|
||||
pub fn receive_messages_system(bridge: Option<Res<GossipBridge>>) {
|
||||
let Some(bridge) = bridge else {
|
||||
return;
|
||||
};
|
||||
|
||||
// Poll for incoming messages
|
||||
while let Some(message) = bridge.try_recv() {
|
||||
// For now, just log the message
|
||||
debug!("Received message: {:?}", message.message);
|
||||
|
||||
// Use receive_and_apply_deltas_system for full functionality
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_gossip_bridge_creation() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let bridge = GossipBridge::new(node_id);
|
||||
|
||||
assert_eq!(bridge.node_id(), node_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_send_message() {
|
||||
use crate::networking::{
|
||||
JoinType,
|
||||
SessionId,
|
||||
SyncMessage,
|
||||
};
|
||||
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let bridge = GossipBridge::new(node_id);
|
||||
let session_id = SessionId::new();
|
||||
|
||||
let message = SyncMessage::JoinRequest {
|
||||
node_id,
|
||||
session_id,
|
||||
session_secret: None,
|
||||
last_known_clock: None,
|
||||
join_type: JoinType::Fresh,
|
||||
};
|
||||
let versioned = VersionedMessage::new(message);
|
||||
|
||||
let result = bridge.send(versioned);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_recv_empty() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let bridge = GossipBridge::new(node_id);
|
||||
|
||||
assert!(bridge.try_recv().is_none());
|
||||
}
|
||||
}
|
||||
610
crates/libmarathon/src/networking/join_protocol.rs
Normal file
610
crates/libmarathon/src/networking/join_protocol.rs
Normal file
@@ -0,0 +1,610 @@
|
||||
//! Join protocol for new peer onboarding
|
||||
//!
|
||||
//! This module handles the protocol for new peers to join an existing session
|
||||
//! and receive the full world state. The join flow:
|
||||
//!
|
||||
//! 1. New peer sends JoinRequest with node ID and optional session secret
|
||||
//! 2. Existing peer validates request and responds with FullState
|
||||
//! 3. New peer applies FullState to initialize local world
|
||||
//! 4. New peer begins participating in delta synchronization
|
||||
//!
|
||||
//! **NOTE:** This is a simplified implementation for Phase 7. Full security
|
||||
//! and session management will be enhanced in Phase 13.
|
||||
|
||||
use bevy::{
|
||||
prelude::*,
|
||||
reflect::TypeRegistry,
|
||||
};
|
||||
|
||||
use crate::networking::{
|
||||
GossipBridge,
|
||||
NetworkedEntity,
|
||||
SessionId,
|
||||
VectorClock,
|
||||
blob_support::BlobStore,
|
||||
delta_generation::NodeVectorClock,
|
||||
entity_map::NetworkEntityMap,
|
||||
messages::{
|
||||
EntityState,
|
||||
JoinType,
|
||||
SyncMessage,
|
||||
VersionedMessage,
|
||||
},
|
||||
};
|
||||
|
||||
/// Build a JoinRequest message
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `node_id` - The UUID of the node requesting to join
|
||||
/// * `session_id` - The session to join
|
||||
/// * `session_secret` - Optional pre-shared secret for authentication
|
||||
/// * `last_known_clock` - Optional vector clock from previous session (for rejoin)
|
||||
/// * `join_type` - Whether this is a fresh join or rejoin
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::{build_join_request, SessionId, JoinType};
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node_id = Uuid::new_v4();
|
||||
/// let session_id = SessionId::new();
|
||||
/// let request = build_join_request(node_id, session_id, None, None, JoinType::Fresh);
|
||||
/// ```
|
||||
pub fn build_join_request(
|
||||
node_id: uuid::Uuid,
|
||||
session_id: SessionId,
|
||||
session_secret: Option<Vec<u8>>,
|
||||
last_known_clock: Option<VectorClock>,
|
||||
join_type: JoinType,
|
||||
) -> VersionedMessage {
|
||||
VersionedMessage::new(SyncMessage::JoinRequest {
|
||||
node_id,
|
||||
session_id,
|
||||
session_secret,
|
||||
last_known_clock,
|
||||
join_type,
|
||||
})
|
||||
}
|
||||
|
||||
/// Build a FullState message containing all networked entities
|
||||
///
|
||||
/// This serializes the entire world state for a new peer. Large worlds may
|
||||
/// take significant bandwidth - Phase 14 will add compression.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `world`: Bevy world containing entities
|
||||
/// - `query`: Query for all NetworkedEntity components
|
||||
/// - `type_registry`: Type registry for serialization
|
||||
/// - `node_clock`: Current node vector clock
|
||||
/// - `blob_store`: Optional blob store for large components
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A FullState message ready to send to the joining peer
|
||||
pub fn build_full_state(
|
||||
world: &World,
|
||||
networked_entities: &Query<(Entity, &NetworkedEntity)>,
|
||||
type_registry: &TypeRegistry,
|
||||
node_clock: &NodeVectorClock,
|
||||
blob_store: Option<&BlobStore>,
|
||||
) -> VersionedMessage {
|
||||
use crate::{
|
||||
networking::{
|
||||
blob_support::create_component_data,
|
||||
messages::ComponentState,
|
||||
},
|
||||
persistence::reflection::serialize_component,
|
||||
};
|
||||
|
||||
let mut entities = Vec::new();
|
||||
|
||||
for (entity, networked) in networked_entities.iter() {
|
||||
let entity_ref = world.entity(entity);
|
||||
let mut components = Vec::new();
|
||||
|
||||
// Iterate over all type registrations to find components
|
||||
for registration in type_registry.iter() {
|
||||
// Skip if no ReflectComponent data
|
||||
let Some(reflect_component) = registration.data::<ReflectComponent>() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let type_path = registration.type_info().type_path();
|
||||
|
||||
// Skip networked wrapper components
|
||||
if type_path.ends_with("::NetworkedEntity") ||
|
||||
type_path.ends_with("::NetworkedTransform") ||
|
||||
type_path.ends_with("::NetworkedSelection") ||
|
||||
type_path.ends_with("::NetworkedDrawingPath")
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try to reflect this component from the entity
|
||||
if let Some(reflected) = reflect_component.reflect(entity_ref) {
|
||||
// Serialize the component
|
||||
if let Ok(serialized) = serialize_component(reflected, type_registry) {
|
||||
// Create component data (inline or blob)
|
||||
let data = if let Some(store) = blob_store {
|
||||
match create_component_data(serialized, store) {
|
||||
| Ok(d) => d,
|
||||
| Err(_) => continue,
|
||||
}
|
||||
} else {
|
||||
crate::networking::ComponentData::Inline(serialized)
|
||||
};
|
||||
|
||||
components.push(ComponentState {
|
||||
component_type: type_path.to_string(),
|
||||
data,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entities.push(EntityState {
|
||||
entity_id: networked.network_id,
|
||||
owner_node_id: networked.owner_node_id,
|
||||
vector_clock: node_clock.clock.clone(),
|
||||
components,
|
||||
is_deleted: false,
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Built FullState with {} entities for new peer",
|
||||
entities.len()
|
||||
);
|
||||
|
||||
VersionedMessage::new(SyncMessage::FullState {
|
||||
entities,
|
||||
vector_clock: node_clock.clock.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Apply a FullState message to the local world
|
||||
///
|
||||
/// This initializes the world for a newly joined peer by spawning all entities
|
||||
/// and applying their component state.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `entities`: List of entity states from FullState message
|
||||
/// - `vector_clock`: Vector clock from FullState
|
||||
/// - `commands`: Bevy commands for spawning entities
|
||||
/// - `entity_map`: Entity map to populate
|
||||
/// - `type_registry`: Type registry for deserialization
|
||||
/// - `node_clock`: Our node's vector clock to update
|
||||
/// - `blob_store`: Optional blob store for resolving blob references
|
||||
/// - `tombstone_registry`: Optional tombstone registry for deletion tracking
|
||||
pub fn apply_full_state(
|
||||
entities: Vec<EntityState>,
|
||||
remote_clock: crate::networking::VectorClock,
|
||||
commands: &mut Commands,
|
||||
entity_map: &mut NetworkEntityMap,
|
||||
type_registry: &TypeRegistry,
|
||||
node_clock: &mut NodeVectorClock,
|
||||
blob_store: Option<&BlobStore>,
|
||||
mut tombstone_registry: Option<&mut crate::networking::TombstoneRegistry>,
|
||||
) {
|
||||
use crate::{
|
||||
networking::blob_support::get_component_data,
|
||||
persistence::reflection::deserialize_component,
|
||||
};
|
||||
|
||||
info!("Applying FullState with {} entities", entities.len());
|
||||
|
||||
// Merge the remote vector clock
|
||||
node_clock.clock.merge(&remote_clock);
|
||||
|
||||
// Spawn all entities and apply their state
|
||||
for entity_state in entities {
|
||||
// Handle deleted entities (tombstones)
|
||||
if entity_state.is_deleted {
|
||||
// Record tombstone
|
||||
if let Some(ref mut registry) = tombstone_registry {
|
||||
registry.record_deletion(
|
||||
entity_state.entity_id,
|
||||
entity_state.owner_node_id,
|
||||
entity_state.vector_clock.clone(),
|
||||
);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Spawn entity with NetworkedEntity and Persisted components
|
||||
// This ensures entities received via FullState are persisted locally
|
||||
let entity = commands
|
||||
.spawn((
|
||||
NetworkedEntity::with_id(entity_state.entity_id, entity_state.owner_node_id),
|
||||
crate::persistence::Persisted::with_id(entity_state.entity_id),
|
||||
))
|
||||
.id();
|
||||
|
||||
// Register in entity map
|
||||
entity_map.insert(entity_state.entity_id, entity);
|
||||
|
||||
let num_components = entity_state.components.len();
|
||||
|
||||
// Apply all components
|
||||
for component_state in &entity_state.components {
|
||||
// Get the actual data (resolve blob if needed)
|
||||
let data_bytes = match &component_state.data {
|
||||
| crate::networking::ComponentData::Inline(bytes) => bytes.clone(),
|
||||
| blob_ref @ crate::networking::ComponentData::BlobRef { .. } => {
|
||||
if let Some(store) = blob_store {
|
||||
match get_component_data(blob_ref, store) {
|
||||
| Ok(bytes) => bytes,
|
||||
| Err(e) => {
|
||||
error!(
|
||||
"Failed to retrieve blob for {}: {}",
|
||||
component_state.component_type, e
|
||||
);
|
||||
continue;
|
||||
},
|
||||
}
|
||||
} else {
|
||||
error!(
|
||||
"Blob reference for {} but no blob store available",
|
||||
component_state.component_type
|
||||
);
|
||||
continue;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
// Deserialize the component
|
||||
let reflected = match deserialize_component(&data_bytes, type_registry) {
|
||||
| Ok(r) => r,
|
||||
| Err(e) => {
|
||||
error!(
|
||||
"Failed to deserialize {}: {}",
|
||||
component_state.component_type, e
|
||||
);
|
||||
continue;
|
||||
},
|
||||
};
|
||||
|
||||
// Get the type registration
|
||||
let registration =
|
||||
match type_registry.get_with_type_path(&component_state.component_type) {
|
||||
| Some(reg) => reg,
|
||||
| None => {
|
||||
error!(
|
||||
"Component type {} not registered",
|
||||
component_state.component_type
|
||||
);
|
||||
continue;
|
||||
},
|
||||
};
|
||||
|
||||
// Get ReflectComponent data
|
||||
let reflect_component = match registration.data::<ReflectComponent>() {
|
||||
| Some(rc) => rc.clone(),
|
||||
| None => {
|
||||
error!(
|
||||
"Component type {} does not have ReflectComponent data",
|
||||
component_state.component_type
|
||||
);
|
||||
continue;
|
||||
},
|
||||
};
|
||||
|
||||
// Insert the component
|
||||
let component_type_owned = component_state.component_type.clone();
|
||||
commands.queue(move |world: &mut World| {
|
||||
let type_registry_arc = {
|
||||
let Some(type_registry_res) = world.get_resource::<AppTypeRegistry>() else {
|
||||
error!("AppTypeRegistry not found in world");
|
||||
return;
|
||||
};
|
||||
type_registry_res.clone()
|
||||
};
|
||||
|
||||
let type_registry = type_registry_arc.read();
|
||||
|
||||
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
|
||||
reflect_component.insert(&mut entity_mut, &*reflected, &type_registry);
|
||||
debug!("Applied component {} from FullState", component_type_owned);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Spawned entity {:?} from FullState with {} components",
|
||||
entity_state.entity_id, num_components
|
||||
);
|
||||
}
|
||||
|
||||
info!("FullState applied successfully");
|
||||
}
|
||||
|
||||
/// System to handle JoinRequest messages
|
||||
///
|
||||
/// When we receive a JoinRequest, build and send a FullState response.
|
||||
///
|
||||
/// Add this to your app:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::handle_join_requests_system;
|
||||
///
|
||||
/// App::new().add_systems(Update, handle_join_requests_system);
|
||||
/// ```
|
||||
pub fn handle_join_requests_system(
|
||||
world: &World,
|
||||
bridge: Option<Res<GossipBridge>>,
|
||||
networked_entities: Query<(Entity, &NetworkedEntity)>,
|
||||
type_registry: Res<AppTypeRegistry>,
|
||||
node_clock: Res<NodeVectorClock>,
|
||||
blob_store: Option<Res<BlobStore>>,
|
||||
) {
|
||||
let Some(bridge) = bridge else {
|
||||
return;
|
||||
};
|
||||
|
||||
let registry = type_registry.read();
|
||||
let blob_store_ref = blob_store.as_deref();
|
||||
|
||||
// Poll for incoming JoinRequest messages
|
||||
while let Some(message) = bridge.try_recv() {
|
||||
match message.message {
|
||||
| SyncMessage::JoinRequest {
|
||||
node_id,
|
||||
session_id,
|
||||
session_secret,
|
||||
last_known_clock: _,
|
||||
join_type,
|
||||
} => {
|
||||
info!(
|
||||
"Received JoinRequest from node {} for session {} (type: {:?})",
|
||||
node_id, session_id, join_type
|
||||
);
|
||||
|
||||
// Validate session secret if configured
|
||||
if let Some(expected) =
|
||||
world.get_resource::<crate::networking::plugin::SessionSecret>()
|
||||
{
|
||||
match &session_secret {
|
||||
| Some(provided_secret) => {
|
||||
if let Err(e) = crate::networking::validate_session_secret(
|
||||
provided_secret,
|
||||
expected.as_bytes(),
|
||||
) {
|
||||
error!("JoinRequest from {} rejected: {}", node_id, e);
|
||||
continue; // Skip this request, don't send FullState
|
||||
}
|
||||
info!("Session secret validated for node {}", node_id);
|
||||
},
|
||||
| None => {
|
||||
warn!(
|
||||
"JoinRequest from {} missing required session secret, rejecting",
|
||||
node_id
|
||||
);
|
||||
continue; // Reject requests without secret when one is configured
|
||||
},
|
||||
}
|
||||
} else if session_secret.is_some() {
|
||||
// No session secret configured but peer provided one
|
||||
debug!("Session secret provided but none configured, accepting");
|
||||
}
|
||||
|
||||
// Build full state
|
||||
let full_state = build_full_state(
|
||||
world,
|
||||
&networked_entities,
|
||||
®istry,
|
||||
&node_clock,
|
||||
blob_store_ref,
|
||||
);
|
||||
|
||||
// Send full state to joining peer
|
||||
if let Err(e) = bridge.send(full_state) {
|
||||
error!("Failed to send FullState: {}", e);
|
||||
} else {
|
||||
info!("Sent FullState to node {}", node_id);
|
||||
}
|
||||
},
|
||||
| _ => {
|
||||
// Not a JoinRequest, ignore (other systems handle other
|
||||
// messages)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// System to handle FullState messages
|
||||
///
|
||||
/// When we receive a FullState (after sending JoinRequest), apply it to our
|
||||
/// world.
|
||||
///
|
||||
/// This system should run BEFORE receive_and_apply_deltas_system to ensure
|
||||
/// we're fully initialized before processing deltas.
|
||||
pub fn handle_full_state_system(
|
||||
mut commands: Commands,
|
||||
bridge: Option<Res<GossipBridge>>,
|
||||
mut entity_map: ResMut<NetworkEntityMap>,
|
||||
type_registry: Res<AppTypeRegistry>,
|
||||
mut node_clock: ResMut<NodeVectorClock>,
|
||||
blob_store: Option<Res<BlobStore>>,
|
||||
mut tombstone_registry: Option<ResMut<crate::networking::TombstoneRegistry>>,
|
||||
) {
|
||||
let Some(bridge) = bridge else {
|
||||
return;
|
||||
};
|
||||
|
||||
let registry = type_registry.read();
|
||||
let blob_store_ref = blob_store.as_deref();
|
||||
|
||||
// Poll for FullState messages
|
||||
while let Some(message) = bridge.try_recv() {
|
||||
match message.message {
|
||||
| SyncMessage::FullState {
|
||||
entities,
|
||||
vector_clock,
|
||||
} => {
|
||||
info!("Received FullState with {} entities", entities.len());
|
||||
|
||||
apply_full_state(
|
||||
entities,
|
||||
vector_clock,
|
||||
&mut commands,
|
||||
&mut entity_map,
|
||||
®istry,
|
||||
&mut node_clock,
|
||||
blob_store_ref,
|
||||
tombstone_registry.as_deref_mut(),
|
||||
);
|
||||
},
|
||||
| _ => {
|
||||
// Not a FullState, ignore
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::networking::VectorClock;
|
||||
|
||||
#[test]
|
||||
fn test_build_join_request() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let session_id = SessionId::new();
|
||||
let request = build_join_request(node_id, session_id.clone(), None, None, JoinType::Fresh);
|
||||
|
||||
match request.message {
|
||||
| SyncMessage::JoinRequest {
|
||||
node_id: req_node_id,
|
||||
session_id: req_session_id,
|
||||
session_secret,
|
||||
last_known_clock,
|
||||
join_type,
|
||||
} => {
|
||||
assert_eq!(req_node_id, node_id);
|
||||
assert_eq!(req_session_id, session_id);
|
||||
assert!(session_secret.is_none());
|
||||
assert!(last_known_clock.is_none());
|
||||
assert!(matches!(join_type, JoinType::Fresh));
|
||||
},
|
||||
| _ => panic!("Expected JoinRequest"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_join_request_with_secret() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let session_id = SessionId::new();
|
||||
let secret = vec![1, 2, 3, 4];
|
||||
let request = build_join_request(
|
||||
node_id,
|
||||
session_id.clone(),
|
||||
Some(secret.clone()),
|
||||
None,
|
||||
JoinType::Fresh,
|
||||
);
|
||||
|
||||
match request.message {
|
||||
| SyncMessage::JoinRequest {
|
||||
node_id: _,
|
||||
session_id: req_session_id,
|
||||
session_secret,
|
||||
last_known_clock,
|
||||
join_type,
|
||||
} => {
|
||||
assert_eq!(req_session_id, session_id);
|
||||
assert_eq!(session_secret, Some(secret));
|
||||
assert!(last_known_clock.is_none());
|
||||
assert!(matches!(join_type, JoinType::Fresh));
|
||||
},
|
||||
| _ => panic!("Expected JoinRequest"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_join_request_rejoin() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let session_id = SessionId::new();
|
||||
let clock = VectorClock::new();
|
||||
let join_type = JoinType::Rejoin {
|
||||
last_active: 1234567890,
|
||||
entity_count: 42,
|
||||
};
|
||||
|
||||
let request = build_join_request(
|
||||
node_id,
|
||||
session_id.clone(),
|
||||
None,
|
||||
Some(clock.clone()),
|
||||
join_type.clone(),
|
||||
);
|
||||
|
||||
match request.message {
|
||||
| SyncMessage::JoinRequest {
|
||||
node_id: req_node_id,
|
||||
session_id: req_session_id,
|
||||
session_secret,
|
||||
last_known_clock,
|
||||
join_type: req_join_type,
|
||||
} => {
|
||||
assert_eq!(req_node_id, node_id);
|
||||
assert_eq!(req_session_id, session_id);
|
||||
assert!(session_secret.is_none());
|
||||
assert_eq!(last_known_clock, Some(clock));
|
||||
assert!(matches!(req_join_type, JoinType::Rejoin { .. }));
|
||||
},
|
||||
| _ => panic!("Expected JoinRequest"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entity_state_structure() {
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let owner_node_id = uuid::Uuid::new_v4();
|
||||
|
||||
let state = EntityState {
|
||||
entity_id,
|
||||
owner_node_id,
|
||||
vector_clock: VectorClock::new(),
|
||||
components: vec![],
|
||||
is_deleted: false,
|
||||
};
|
||||
|
||||
assert_eq!(state.entity_id, entity_id);
|
||||
assert_eq!(state.owner_node_id, owner_node_id);
|
||||
assert_eq!(state.components.len(), 0);
|
||||
assert!(!state.is_deleted);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_full_state_empty() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let mut node_clock = NodeVectorClock::new(node_id);
|
||||
let remote_clock = VectorClock::new();
|
||||
|
||||
// Create minimal setup for testing
|
||||
let mut entity_map = NetworkEntityMap::new();
|
||||
let type_registry = TypeRegistry::new();
|
||||
|
||||
// Need a minimal Bevy app for Commands
|
||||
let mut app = App::new();
|
||||
let mut commands = app.world_mut().commands();
|
||||
|
||||
apply_full_state(
|
||||
vec![],
|
||||
remote_clock.clone(),
|
||||
&mut commands,
|
||||
&mut entity_map,
|
||||
&type_registry,
|
||||
&mut node_clock,
|
||||
None,
|
||||
None, // tombstone_registry
|
||||
);
|
||||
|
||||
// Should have merged clocks
|
||||
assert_eq!(node_clock.clock, remote_clock);
|
||||
}
|
||||
}
|
||||
730
crates/libmarathon/src/networking/locks.rs
Normal file
730
crates/libmarathon/src/networking/locks.rs
Normal file
@@ -0,0 +1,730 @@
|
||||
//! Entity lock system for collaborative editing
|
||||
//!
|
||||
//! Provides optimistic entity locking to prevent concurrent modifications.
|
||||
//! Locks are acquired when entities are selected and released when deselected.
|
||||
//!
|
||||
//! # Lock Protocol
|
||||
//!
|
||||
//! 1. **Acquisition**: User selects entity → broadcast `LockRequest`
|
||||
//! 2. **Optimistic Apply**: All peers apply lock locally
|
||||
//! 3. **Confirm**: Holder broadcasts `LockAcquired`
|
||||
//! 4. **Conflict Resolution**: If two nodes acquire simultaneously, higher node ID wins
|
||||
//! 5. **Release**: User deselects entity → broadcast `LockReleased`
|
||||
//! 6. **Timeout**: 5-second timeout as crash recovery fallback
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use bevy::prelude::*;
|
||||
//! use libmarathon::networking::{EntityLockRegistry, acquire_entity_lock, release_entity_lock};
|
||||
//! use uuid::Uuid;
|
||||
//!
|
||||
//! fn my_system(world: &mut World) {
|
||||
//! let entity_id = Uuid::new_v4();
|
||||
//! let node_id = Uuid::new_v4();
|
||||
//!
|
||||
//! let mut registry = world.resource_mut::<EntityLockRegistry>();
|
||||
//!
|
||||
//! // Acquire lock when user selects entity
|
||||
//! registry.try_acquire(entity_id, node_id);
|
||||
//!
|
||||
//! // Release lock when user deselects entity
|
||||
//! registry.release(entity_id, node_id);
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
time::{
|
||||
Duration,
|
||||
Instant,
|
||||
},
|
||||
};
|
||||
|
||||
use bevy::prelude::*;
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::networking::{
|
||||
GossipBridge,
|
||||
NetworkedSelection,
|
||||
NodeId,
|
||||
VersionedMessage,
|
||||
delta_generation::NodeVectorClock,
|
||||
messages::SyncMessage,
|
||||
};
|
||||
|
||||
/// Duration before a lock automatically expires (crash recovery)
|
||||
pub const LOCK_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
/// Maximum number of concurrent locks per node (rate limiting)
|
||||
pub const MAX_LOCKS_PER_NODE: usize = 100;
|
||||
|
||||
/// Lock acquisition/release messages
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum LockMessage {
|
||||
/// Request to acquire a lock on an entity
|
||||
LockRequest {
|
||||
entity_id: Uuid,
|
||||
node_id: NodeId,
|
||||
},
|
||||
|
||||
/// Confirmation that a lock was successfully acquired
|
||||
LockAcquired {
|
||||
entity_id: Uuid,
|
||||
holder: NodeId,
|
||||
},
|
||||
|
||||
/// Lock acquisition failed (already locked by another node)
|
||||
LockRejected {
|
||||
entity_id: Uuid,
|
||||
requester: NodeId,
|
||||
current_holder: NodeId,
|
||||
},
|
||||
|
||||
/// Heartbeat to renew a held lock (sent ~1/sec by holder)
|
||||
///
|
||||
/// If no heartbeat is received for 5 seconds, the lock expires.
|
||||
/// This provides automatic crash recovery without explicit timeouts.
|
||||
LockHeartbeat {
|
||||
entity_id: Uuid,
|
||||
holder: NodeId,
|
||||
},
|
||||
|
||||
/// Request to release a lock
|
||||
LockRelease {
|
||||
entity_id: Uuid,
|
||||
node_id: NodeId,
|
||||
},
|
||||
|
||||
/// Confirmation that a lock was released
|
||||
LockReleased {
|
||||
entity_id: Uuid,
|
||||
},
|
||||
}
|
||||
|
||||
/// Information about an active entity lock
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EntityLock {
|
||||
/// ID of the entity being locked
|
||||
pub entity_id: Uuid,
|
||||
|
||||
/// Node that holds the lock
|
||||
pub holder: NodeId,
|
||||
|
||||
/// When the last heartbeat was received (or when lock was acquired)
|
||||
pub last_heartbeat: Instant,
|
||||
|
||||
/// Lock timeout duration (expires if no heartbeat for this long)
|
||||
pub timeout: Duration,
|
||||
}
|
||||
|
||||
impl EntityLock {
|
||||
/// Create a new entity lock
|
||||
pub fn new(entity_id: Uuid, holder: NodeId) -> Self {
|
||||
Self {
|
||||
entity_id,
|
||||
holder,
|
||||
last_heartbeat: Instant::now(),
|
||||
timeout: LOCK_TIMEOUT,
|
||||
}
|
||||
}
|
||||
|
||||
/// Renew the lock with a heartbeat
|
||||
pub fn renew(&mut self) {
|
||||
self.last_heartbeat = Instant::now();
|
||||
}
|
||||
|
||||
/// Check if the lock has expired (no heartbeat for > timeout)
|
||||
pub fn is_expired(&self) -> bool {
|
||||
self.last_heartbeat.elapsed() >= self.timeout
|
||||
}
|
||||
|
||||
/// Check if this lock is held by the given node
|
||||
pub fn is_held_by(&self, node_id: NodeId) -> bool {
|
||||
self.holder == node_id
|
||||
}
|
||||
}
|
||||
|
||||
/// Registry of all active entity locks
|
||||
///
|
||||
/// This resource tracks which entities are locked and by whom.
|
||||
/// It's used to prevent concurrent modifications to the same entity.
|
||||
#[derive(Resource, Default)]
|
||||
pub struct EntityLockRegistry {
|
||||
/// Map of entity ID to lock info
|
||||
locks: HashMap<Uuid, EntityLock>,
|
||||
|
||||
/// Count of locks held by each node (for rate limiting)
|
||||
locks_per_node: HashMap<NodeId, usize>,
|
||||
}
|
||||
|
||||
impl EntityLockRegistry {
|
||||
/// Create a new empty lock registry
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
locks: HashMap::new(),
|
||||
locks_per_node: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to acquire a lock on an entity
|
||||
///
|
||||
/// Returns Ok(()) if lock was acquired, Err with current holder if already locked.
|
||||
pub fn try_acquire(&mut self, entity_id: Uuid, node_id: NodeId) -> Result<(), NodeId> {
|
||||
// Check if already locked
|
||||
if let Some(existing_lock) = self.locks.get(&entity_id) {
|
||||
// If expired, allow re-acquisition
|
||||
if !existing_lock.is_expired() {
|
||||
return Err(existing_lock.holder);
|
||||
}
|
||||
|
||||
// Remove expired lock
|
||||
self.remove_lock(entity_id);
|
||||
}
|
||||
|
||||
// Check rate limit
|
||||
let node_lock_count = self.locks_per_node.get(&node_id).copied().unwrap_or(0);
|
||||
if node_lock_count >= MAX_LOCKS_PER_NODE {
|
||||
warn!(
|
||||
"Node {} at lock limit ({}/{}), rejecting acquisition",
|
||||
node_id, node_lock_count, MAX_LOCKS_PER_NODE
|
||||
);
|
||||
return Err(node_id); // Return self as "holder" to indicate rate limit
|
||||
}
|
||||
|
||||
// Acquire the lock
|
||||
let lock = EntityLock::new(entity_id, node_id);
|
||||
self.locks.insert(entity_id, lock);
|
||||
|
||||
// Update node lock count
|
||||
*self.locks_per_node.entry(node_id).or_insert(0) += 1;
|
||||
|
||||
debug!("Lock acquired: entity {} by node {}", entity_id, node_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Release a lock on an entity
|
||||
///
|
||||
/// Only succeeds if the node currently holds the lock.
|
||||
pub fn release(&mut self, entity_id: Uuid, node_id: NodeId) -> bool {
|
||||
if let Some(lock) = self.locks.get(&entity_id) {
|
||||
if lock.holder == node_id {
|
||||
self.remove_lock(entity_id);
|
||||
debug!("Lock released: entity {} by node {}", entity_id, node_id);
|
||||
return true;
|
||||
} else {
|
||||
warn!(
|
||||
"Node {} tried to release lock held by node {}",
|
||||
node_id, lock.holder
|
||||
);
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Force release a lock (for timeout cleanup)
|
||||
pub fn force_release(&mut self, entity_id: Uuid) {
|
||||
if self.locks.remove(&entity_id).is_some() {
|
||||
debug!("Lock force-released: entity {}", entity_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an entity is locked by any node
|
||||
///
|
||||
/// Takes the local node ID to properly handle expiration:
|
||||
/// - Our own locks are never considered expired (held exactly as long as selected)
|
||||
/// - Remote locks are subject to the 5-second timeout
|
||||
pub fn is_locked(&self, entity_id: Uuid, local_node_id: NodeId) -> bool {
|
||||
self.locks.get(&entity_id).map_or(false, |lock| {
|
||||
// Our own locks never expire
|
||||
lock.holder == local_node_id || !lock.is_expired()
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if an entity is locked by a specific node
|
||||
///
|
||||
/// Takes the local node ID to properly handle expiration:
|
||||
/// - If checking our own lock, ignore expiration (held exactly as long as selected)
|
||||
/// - If checking another node's lock, apply 5-second timeout
|
||||
pub fn is_locked_by(&self, entity_id: Uuid, node_id: NodeId, local_node_id: NodeId) -> bool {
|
||||
self.locks.get(&entity_id).map_or(false, |lock| {
|
||||
if lock.holder != node_id {
|
||||
// Not held by the queried node
|
||||
false
|
||||
} else if lock.holder == local_node_id {
|
||||
// Checking our own lock - never expires
|
||||
true
|
||||
} else {
|
||||
// Checking remote lock - check expiration
|
||||
!lock.is_expired()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the holder of a lock (if locked and not expired)
|
||||
///
|
||||
/// Takes the local node ID to properly handle expiration:
|
||||
/// - Our own locks are never considered expired
|
||||
/// - Remote locks are subject to the 5-second timeout
|
||||
pub fn get_holder(&self, entity_id: Uuid, local_node_id: NodeId) -> Option<NodeId> {
|
||||
self.locks.get(&entity_id).and_then(|lock| {
|
||||
// Our own locks never expire
|
||||
if lock.holder == local_node_id || !lock.is_expired() {
|
||||
Some(lock.holder)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Renew a lock's heartbeat
|
||||
///
|
||||
/// Returns true if the heartbeat was renewed, false if lock doesn't exist
|
||||
/// or is held by a different node.
|
||||
pub fn renew_heartbeat(&mut self, entity_id: Uuid, node_id: NodeId) -> bool {
|
||||
if let Some(lock) = self.locks.get_mut(&entity_id) {
|
||||
if lock.holder == node_id {
|
||||
lock.renew();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Get all expired locks
|
||||
pub fn get_expired_locks(&self) -> Vec<Uuid> {
|
||||
self.locks
|
||||
.iter()
|
||||
.filter(|(_, lock)| lock.is_expired())
|
||||
.map(|(entity_id, _)| *entity_id)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get number of locks held by a node
|
||||
pub fn get_node_lock_count(&self, node_id: NodeId) -> usize {
|
||||
self.locks_per_node.get(&node_id).copied().unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Get total number of active locks
|
||||
pub fn total_locks(&self) -> usize {
|
||||
self.locks.len()
|
||||
}
|
||||
|
||||
/// Remove a lock and update bookkeeping
|
||||
fn remove_lock(&mut self, entity_id: Uuid) {
|
||||
if let Some(lock) = self.locks.remove(&entity_id) {
|
||||
// Decrement node lock count
|
||||
if let Some(count) = self.locks_per_node.get_mut(&lock.holder) {
|
||||
*count = count.saturating_sub(1);
|
||||
if *count == 0 {
|
||||
self.locks_per_node.remove(&lock.holder);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test helper: Manually expire a lock by setting its heartbeat timestamp to the past
|
||||
///
|
||||
/// This is only intended for testing purposes to simulate lock expiration without waiting.
|
||||
pub fn expire_lock_for_testing(&mut self, entity_id: Uuid) {
|
||||
if let Some(lock) = self.locks.get_mut(&entity_id) {
|
||||
lock.last_heartbeat = Instant::now() - Duration::from_secs(10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// System to release locks when entities are deselected
|
||||
///
|
||||
/// This system detects when entities are removed from selection and releases
|
||||
/// any locks held on those entities, broadcasting the release to other peers.
|
||||
///
|
||||
/// Add to your app as an Update system:
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::release_locks_on_deselection_system;
|
||||
///
|
||||
/// App::new().add_systems(Update, release_locks_on_deselection_system);
|
||||
/// ```
|
||||
pub fn release_locks_on_deselection_system(
|
||||
mut registry: ResMut<EntityLockRegistry>,
|
||||
node_clock: Res<NodeVectorClock>,
|
||||
bridge: Option<Res<GossipBridge>>,
|
||||
mut selection_query: Query<&mut NetworkedSelection, Changed<NetworkedSelection>>,
|
||||
) {
|
||||
let node_id = node_clock.node_id;
|
||||
|
||||
for selection in selection_query.iter_mut() {
|
||||
// Find entities that were previously locked but are no longer selected
|
||||
let currently_selected: std::collections::HashSet<Uuid> = selection.selected_ids.clone();
|
||||
|
||||
// Check all locks held by this node
|
||||
let locks_to_release: Vec<Uuid> = registry
|
||||
.locks
|
||||
.iter()
|
||||
.filter(|(entity_id, lock)| {
|
||||
// Release if held by us and not currently selected
|
||||
lock.holder == node_id && !currently_selected.contains(entity_id)
|
||||
})
|
||||
.map(|(entity_id, _)| *entity_id)
|
||||
.collect();
|
||||
|
||||
// Release each lock and broadcast
|
||||
for entity_id in locks_to_release {
|
||||
if registry.release(entity_id, node_id) {
|
||||
debug!("Releasing lock on deselected entity {}", entity_id);
|
||||
|
||||
// Broadcast LockRelease
|
||||
if let Some(ref bridge) = bridge {
|
||||
let msg = VersionedMessage::new(SyncMessage::Lock(LockMessage::LockRelease {
|
||||
entity_id,
|
||||
node_id,
|
||||
}));
|
||||
|
||||
if let Err(e) = bridge.send(msg) {
|
||||
error!("Failed to broadcast LockRelease on deselection: {}", e);
|
||||
} else {
|
||||
info!("Lock released on deselection: entity {}", entity_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// System to clean up expired locks (crash recovery)
|
||||
///
|
||||
/// This system periodically removes locks that have exceeded their timeout
|
||||
/// duration (default 5 seconds). This provides crash recovery - if a **remote**
|
||||
/// node crashes while holding a lock, it will eventually expire.
|
||||
///
|
||||
/// **Important**: Only remote locks are cleaned up. Local locks (held by this node)
|
||||
/// are never timed out - they're held exactly as long as entities are selected,
|
||||
/// and only released via deselection.
|
||||
///
|
||||
/// Add to your app as an Update system:
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::cleanup_expired_locks_system;
|
||||
///
|
||||
/// App::new().add_systems(Update, cleanup_expired_locks_system);
|
||||
/// ```
|
||||
pub fn cleanup_expired_locks_system(
|
||||
mut registry: ResMut<EntityLockRegistry>,
|
||||
node_clock: Res<NodeVectorClock>,
|
||||
bridge: Option<Res<GossipBridge>>,
|
||||
) {
|
||||
let node_id = node_clock.node_id;
|
||||
|
||||
// Only clean up REMOTE locks (locks held by other nodes)
|
||||
// Our own locks are managed by release_locks_on_deselection_system
|
||||
let expired: Vec<Uuid> = registry
|
||||
.locks
|
||||
.iter()
|
||||
.filter(|(_, lock)| {
|
||||
// Only expire locks held by OTHER nodes
|
||||
lock.is_expired() && lock.holder != node_id
|
||||
})
|
||||
.map(|(entity_id, _)| *entity_id)
|
||||
.collect();
|
||||
|
||||
if !expired.is_empty() {
|
||||
info!("Cleaning up {} expired remote locks", expired.len());
|
||||
|
||||
for entity_id in expired {
|
||||
debug!("Force-releasing expired remote lock on entity {}", entity_id);
|
||||
registry.force_release(entity_id);
|
||||
|
||||
// Broadcast LockReleased
|
||||
if let Some(ref bridge) = bridge {
|
||||
let msg =
|
||||
VersionedMessage::new(SyncMessage::Lock(LockMessage::LockReleased { entity_id }));
|
||||
|
||||
if let Err(e) = bridge.send(msg) {
|
||||
error!("Failed to broadcast LockReleased for expired lock: {}", e);
|
||||
} else {
|
||||
info!("Expired remote lock cleaned up: entity {}", entity_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// System to broadcast heartbeats for all locks we currently hold
|
||||
///
|
||||
/// This system runs periodically (~1/sec) and broadcasts a heartbeat for each
|
||||
/// lock this node holds. This keeps locks alive and provides crash detection -
|
||||
/// if a node crashes, heartbeats stop and locks expire after 5 seconds.
|
||||
///
|
||||
/// Add to your app as an Update system with a run condition to throttle it:
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use bevy::time::common_conditions::on_timer;
|
||||
/// use std::time::Duration;
|
||||
/// use libmarathon::networking::broadcast_lock_heartbeats_system;
|
||||
///
|
||||
/// App::new().add_systems(Update,
|
||||
/// broadcast_lock_heartbeats_system.run_if(on_timer(Duration::from_secs(1)))
|
||||
/// );
|
||||
/// ```
|
||||
pub fn broadcast_lock_heartbeats_system(
|
||||
mut registry: ResMut<EntityLockRegistry>,
|
||||
node_clock: Res<NodeVectorClock>,
|
||||
bridge: Option<Res<GossipBridge>>,
|
||||
) {
|
||||
let node_id = node_clock.node_id;
|
||||
|
||||
// Find all locks held by this node
|
||||
let our_locks: Vec<Uuid> = registry
|
||||
.locks
|
||||
.iter()
|
||||
.filter(|(_, lock)| lock.holder == node_id && !lock.is_expired())
|
||||
.map(|(entity_id, _)| *entity_id)
|
||||
.collect();
|
||||
|
||||
if our_locks.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
debug!("Broadcasting {} lock heartbeats", our_locks.len());
|
||||
|
||||
// Renew local locks and broadcast heartbeat for each lock
|
||||
for entity_id in &our_locks {
|
||||
// Renew the lock locally first (don't rely on network loopback)
|
||||
registry.renew_heartbeat(*entity_id, node_id);
|
||||
}
|
||||
|
||||
// Broadcast heartbeat messages to peers
|
||||
if let Some(ref bridge) = bridge {
|
||||
for entity_id in our_locks {
|
||||
let msg = VersionedMessage::new(SyncMessage::Lock(LockMessage::LockHeartbeat {
|
||||
entity_id,
|
||||
holder: node_id,
|
||||
}));
|
||||
|
||||
if let Err(e) = bridge.send(msg) {
|
||||
error!(
|
||||
"Failed to broadcast heartbeat for entity {}: {}",
|
||||
entity_id, e
|
||||
);
|
||||
} else {
|
||||
trace!("Heartbeat sent for locked entity {}", entity_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_lock_acquisition() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node_id = Uuid::new_v4();
|
||||
|
||||
// Should acquire successfully
|
||||
assert!(registry.try_acquire(entity_id, node_id).is_ok());
|
||||
assert!(registry.is_locked(entity_id, node_id));
|
||||
assert!(registry.is_locked_by(entity_id, node_id, node_id));
|
||||
assert_eq!(registry.get_holder(entity_id, node_id), Some(node_id));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_conflict() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node1 = Uuid::new_v4();
|
||||
let node2 = Uuid::new_v4();
|
||||
|
||||
// Node 1 acquires
|
||||
assert!(registry.try_acquire(entity_id, node1).is_ok());
|
||||
|
||||
// Node 2 should be rejected
|
||||
assert_eq!(registry.try_acquire(entity_id, node2), Err(node1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_release() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node_id = Uuid::new_v4();
|
||||
|
||||
// Acquire and release
|
||||
registry.try_acquire(entity_id, node_id).unwrap();
|
||||
assert!(registry.release(entity_id, node_id));
|
||||
assert!(!registry.is_locked(entity_id, node_id));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_node_cannot_release() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node1 = Uuid::new_v4();
|
||||
let node2 = Uuid::new_v4();
|
||||
|
||||
// Node 1 acquires
|
||||
registry.try_acquire(entity_id, node1).unwrap();
|
||||
|
||||
// Node 2 cannot release
|
||||
assert!(!registry.release(entity_id, node2));
|
||||
assert!(registry.is_locked(entity_id, node2));
|
||||
assert!(registry.is_locked_by(entity_id, node1, node2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_timeout() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node_id = Uuid::new_v4();
|
||||
|
||||
// Acquire with very short timeout
|
||||
registry.try_acquire(entity_id, node_id).unwrap();
|
||||
|
||||
// Manually set timeout to 0 for testing
|
||||
if let Some(lock) = registry.locks.get_mut(&entity_id) {
|
||||
lock.timeout = Duration::from_secs(0);
|
||||
}
|
||||
|
||||
// Should be detected as expired
|
||||
let expired = registry.get_expired_locks();
|
||||
assert_eq!(expired.len(), 1);
|
||||
assert_eq!(expired[0], entity_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_force_release() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node_id = Uuid::new_v4();
|
||||
|
||||
registry.try_acquire(entity_id, node_id).unwrap();
|
||||
registry.force_release(entity_id);
|
||||
assert!(!registry.is_locked(entity_id, node_id));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rate_limiting() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let node_id = Uuid::new_v4();
|
||||
|
||||
// Acquire MAX_LOCKS_PER_NODE locks
|
||||
for _ in 0..MAX_LOCKS_PER_NODE {
|
||||
let entity_id = Uuid::new_v4();
|
||||
assert!(registry.try_acquire(entity_id, node_id).is_ok());
|
||||
}
|
||||
|
||||
// Next acquisition should fail (rate limit)
|
||||
let entity_id = Uuid::new_v4();
|
||||
assert!(registry.try_acquire(entity_id, node_id).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_lock_count() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let node_id = Uuid::new_v4();
|
||||
|
||||
assert_eq!(registry.get_node_lock_count(node_id), 0);
|
||||
|
||||
// Acquire 3 locks
|
||||
for _ in 0..3 {
|
||||
let entity_id = Uuid::new_v4();
|
||||
registry.try_acquire(entity_id, node_id).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(registry.get_node_lock_count(node_id), 3);
|
||||
assert_eq!(registry.total_locks(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_message_serialization() {
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node_id = Uuid::new_v4();
|
||||
|
||||
let messages = vec![
|
||||
LockMessage::LockRequest { entity_id, node_id },
|
||||
LockMessage::LockAcquired {
|
||||
entity_id,
|
||||
holder: node_id,
|
||||
},
|
||||
LockMessage::LockRejected {
|
||||
entity_id,
|
||||
requester: node_id,
|
||||
current_holder: Uuid::new_v4(),
|
||||
},
|
||||
LockMessage::LockHeartbeat {
|
||||
entity_id,
|
||||
holder: node_id,
|
||||
},
|
||||
LockMessage::LockRelease { entity_id, node_id },
|
||||
LockMessage::LockReleased { entity_id },
|
||||
];
|
||||
|
||||
for message in messages {
|
||||
let bytes = bincode::serialize(&message).unwrap();
|
||||
let deserialized: LockMessage = bincode::deserialize(&bytes).unwrap();
|
||||
assert_eq!(message, deserialized);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_renewal() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node_id = Uuid::new_v4();
|
||||
|
||||
// Acquire lock
|
||||
registry.try_acquire(entity_id, node_id).unwrap();
|
||||
|
||||
// Get initial heartbeat time
|
||||
let initial_heartbeat = registry.locks.get(&entity_id).unwrap().last_heartbeat;
|
||||
|
||||
// Sleep a bit to ensure time difference
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
|
||||
// Renew heartbeat
|
||||
assert!(registry.renew_heartbeat(entity_id, node_id));
|
||||
|
||||
// Check that heartbeat was updated
|
||||
let updated_heartbeat = registry.locks.get(&entity_id).unwrap().last_heartbeat;
|
||||
assert!(updated_heartbeat > initial_heartbeat);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_wrong_node() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node1 = Uuid::new_v4();
|
||||
let node2 = Uuid::new_v4();
|
||||
|
||||
// Node 1 acquires
|
||||
registry.try_acquire(entity_id, node1).unwrap();
|
||||
|
||||
// Node 2 tries to renew heartbeat - should fail
|
||||
assert!(!registry.renew_heartbeat(entity_id, node2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heartbeat_expiration() {
|
||||
let mut registry = EntityLockRegistry::new();
|
||||
let entity_id = Uuid::new_v4();
|
||||
let node_id = Uuid::new_v4();
|
||||
|
||||
// Acquire with very short timeout
|
||||
registry.try_acquire(entity_id, node_id).unwrap();
|
||||
|
||||
// Manually set timeout to 0 for testing
|
||||
if let Some(lock) = registry.locks.get_mut(&entity_id) {
|
||||
lock.timeout = Duration::from_secs(0);
|
||||
}
|
||||
|
||||
// Should be detected as expired
|
||||
let expired = registry.get_expired_locks();
|
||||
assert_eq!(expired.len(), 1);
|
||||
assert_eq!(expired[0], entity_id);
|
||||
}
|
||||
}
|
||||
264
crates/libmarathon/src/networking/merge.rs
Normal file
264
crates/libmarathon/src/networking/merge.rs
Normal file
@@ -0,0 +1,264 @@
|
||||
//! CRDT merge logic for conflict resolution
|
||||
//!
|
||||
//! This module implements the merge semantics for different CRDT types:
|
||||
//! - Last-Write-Wins (LWW) for simple components
|
||||
//! - OR-Set for concurrent add/remove
|
||||
//! - Sequence CRDT (RGA) for ordered lists
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::networking::{
|
||||
operations::ComponentOp,
|
||||
vector_clock::{
|
||||
NodeId,
|
||||
VectorClock,
|
||||
},
|
||||
};
|
||||
|
||||
/// Result of comparing two operations for merge
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum MergeDecision {
|
||||
/// The local operation wins (keep local, discard remote)
|
||||
KeepLocal,
|
||||
|
||||
/// The remote operation wins (apply remote, discard local)
|
||||
ApplyRemote,
|
||||
|
||||
/// Operations are concurrent, need CRDT-specific merge
|
||||
Concurrent,
|
||||
|
||||
/// Operations are identical
|
||||
Equal,
|
||||
}
|
||||
|
||||
/// Compare two operations using vector clocks to determine merge decision
|
||||
///
|
||||
/// This implements Last-Write-Wins (LWW) semantics with node ID tiebreaking.
|
||||
///
|
||||
/// # Algorithm
|
||||
///
|
||||
/// 1. If local happened-before remote: ApplyRemote
|
||||
/// 2. If remote happened-before local: KeepLocal
|
||||
/// 3. If concurrent: use node ID as tiebreaker (higher node ID wins)
|
||||
/// 4. If equal: Equal
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::{
|
||||
/// VectorClock,
|
||||
/// compare_operations_lww,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node1 = Uuid::new_v4();
|
||||
/// let node2 = Uuid::new_v4();
|
||||
///
|
||||
/// let mut clock1 = VectorClock::new();
|
||||
/// clock1.increment(node1);
|
||||
///
|
||||
/// let mut clock2 = VectorClock::new();
|
||||
/// clock2.increment(node2);
|
||||
///
|
||||
/// // Concurrent operations use node ID as tiebreaker
|
||||
/// let decision = compare_operations_lww(&clock1, node1, &clock2, node2);
|
||||
/// ```
|
||||
pub fn compare_operations_lww(
|
||||
local_clock: &VectorClock,
|
||||
local_node: NodeId,
|
||||
remote_clock: &VectorClock,
|
||||
remote_node: NodeId,
|
||||
) -> MergeDecision {
|
||||
// Check if clocks are equal
|
||||
if local_clock == remote_clock && local_node == remote_node {
|
||||
return MergeDecision::Equal;
|
||||
}
|
||||
|
||||
// Check happens-before relationship
|
||||
if local_clock.happened_before(remote_clock) {
|
||||
return MergeDecision::ApplyRemote;
|
||||
}
|
||||
|
||||
if remote_clock.happened_before(local_clock) {
|
||||
return MergeDecision::KeepLocal;
|
||||
}
|
||||
|
||||
// Concurrent operations - use node ID as tiebreaker
|
||||
// Higher node ID wins for deterministic resolution
|
||||
if remote_node > local_node {
|
||||
MergeDecision::ApplyRemote
|
||||
} else if local_node > remote_node {
|
||||
MergeDecision::KeepLocal
|
||||
} else {
|
||||
MergeDecision::Concurrent
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine if a remote Set operation should be applied
|
||||
///
|
||||
/// This is a convenience wrapper around `compare_operations_lww` for Set
|
||||
/// operations specifically.
|
||||
pub fn should_apply_set(local_op: &ComponentOp, remote_op: &ComponentOp) -> bool {
|
||||
// Extract vector clocks and node IDs
|
||||
let (local_clock, local_data) = match local_op {
|
||||
| ComponentOp::Set {
|
||||
vector_clock, data, ..
|
||||
} => (vector_clock, data),
|
||||
| _ => return false,
|
||||
};
|
||||
|
||||
let (remote_clock, remote_data) = match remote_op {
|
||||
| ComponentOp::Set {
|
||||
vector_clock, data, ..
|
||||
} => (vector_clock, data),
|
||||
| _ => return false,
|
||||
};
|
||||
|
||||
// If data is identical, no need to apply
|
||||
if local_data == remote_data {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Use the sequence number from the clocks as a simple tiebreaker
|
||||
// In a real implementation, we'd use the full node IDs
|
||||
let local_seq: u64 = local_clock.clocks.values().sum();
|
||||
let remote_seq: u64 = remote_clock.clocks.values().sum();
|
||||
|
||||
// Compare clocks
|
||||
match compare_operations_lww(
|
||||
local_clock,
|
||||
uuid::Uuid::nil(), // Simplified - would use actual node IDs
|
||||
remote_clock,
|
||||
uuid::Uuid::nil(),
|
||||
) {
|
||||
| MergeDecision::ApplyRemote => true,
|
||||
| MergeDecision::KeepLocal => false,
|
||||
| MergeDecision::Concurrent => remote_seq > local_seq,
|
||||
| MergeDecision::Equal => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Log a merge conflict for debugging
|
||||
///
|
||||
/// This helps track when concurrent operations occur and how they're resolved.
|
||||
pub fn log_merge_conflict(
|
||||
component_type: &str,
|
||||
local_clock: &VectorClock,
|
||||
remote_clock: &VectorClock,
|
||||
decision: MergeDecision,
|
||||
) {
|
||||
info!(
|
||||
"Merge conflict on {}: local={:?}, remote={:?}, decision={:?}",
|
||||
component_type, local_clock, remote_clock, decision
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::networking::messages::ComponentData;
|
||||
|
||||
#[test]
|
||||
fn test_lww_happened_before() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node1);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node1);
|
||||
clock2.increment(node1);
|
||||
|
||||
let decision = compare_operations_lww(&clock1, node1, &clock2, node2);
|
||||
assert_eq!(decision, MergeDecision::ApplyRemote);
|
||||
|
||||
let decision = compare_operations_lww(&clock2, node1, &clock1, node2);
|
||||
assert_eq!(decision, MergeDecision::KeepLocal);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lww_concurrent() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node1);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node2);
|
||||
|
||||
// Concurrent operations use node ID tiebreaker
|
||||
let decision = compare_operations_lww(&clock1, node1, &clock2, node2);
|
||||
|
||||
// Should use node ID as tiebreaker
|
||||
assert!(decision == MergeDecision::ApplyRemote || decision == MergeDecision::KeepLocal);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lww_equal() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node1);
|
||||
|
||||
let clock2 = clock1.clone();
|
||||
|
||||
let decision = compare_operations_lww(&clock1, node1, &clock2, node1);
|
||||
assert_eq!(decision, MergeDecision::Equal);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_apply_set_same_data() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let mut clock = VectorClock::new();
|
||||
clock.increment(node_id);
|
||||
|
||||
let data = vec![1, 2, 3];
|
||||
|
||||
let op1 = ComponentOp::Set {
|
||||
component_type: "Transform".to_string(),
|
||||
data: ComponentData::Inline(data.clone()),
|
||||
vector_clock: clock.clone(),
|
||||
};
|
||||
|
||||
let op2 = ComponentOp::Set {
|
||||
component_type: "Transform".to_string(),
|
||||
data: ComponentData::Inline(data.clone()),
|
||||
vector_clock: clock,
|
||||
};
|
||||
|
||||
// Same data, should not apply
|
||||
assert!(!should_apply_set(&op1, &op2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_apply_set_newer_wins() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node_id);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node_id);
|
||||
clock2.increment(node_id);
|
||||
|
||||
let op1 = ComponentOp::Set {
|
||||
component_type: "Transform".to_string(),
|
||||
data: ComponentData::Inline(vec![1, 2, 3]),
|
||||
vector_clock: clock1,
|
||||
};
|
||||
|
||||
let op2 = ComponentOp::Set {
|
||||
component_type: "Transform".to_string(),
|
||||
data: ComponentData::Inline(vec![4, 5, 6]),
|
||||
vector_clock: clock2,
|
||||
};
|
||||
|
||||
// op2 is newer, should apply
|
||||
assert!(should_apply_set(&op1, &op2));
|
||||
|
||||
// op1 is older, should not apply
|
||||
assert!(!should_apply_set(&op2, &op1));
|
||||
}
|
||||
}
|
||||
537
crates/libmarathon/src/networking/message_dispatcher.rs
Normal file
537
crates/libmarathon/src/networking/message_dispatcher.rs
Normal file
@@ -0,0 +1,537 @@
|
||||
//! Message dispatcher for efficient message routing
|
||||
//!
|
||||
//! This module eliminates the DRY violation and O(n²) behavior from having
|
||||
//! multiple systems each polling the same message queue. Instead, a single
|
||||
//! dispatcher system polls once and routes messages to appropriate handlers.
|
||||
|
||||
use bevy::{
|
||||
ecs::system::SystemState,
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
use crate::networking::{
|
||||
GossipBridge,
|
||||
JoinType,
|
||||
NetworkedEntity,
|
||||
TombstoneRegistry,
|
||||
VersionedMessage,
|
||||
apply_entity_delta,
|
||||
apply_full_state,
|
||||
blob_support::BlobStore,
|
||||
build_missing_deltas,
|
||||
delta_generation::NodeVectorClock,
|
||||
entity_map::NetworkEntityMap,
|
||||
messages::SyncMessage,
|
||||
operation_log::OperationLog,
|
||||
plugin::SessionSecret,
|
||||
validate_session_secret,
|
||||
};
|
||||
|
||||
/// Central message dispatcher system
|
||||
///
|
||||
/// This system replaces the individual message polling loops in:
|
||||
/// - `receive_and_apply_deltas_system`
|
||||
/// - `handle_join_requests_system`
|
||||
/// - `handle_full_state_system`
|
||||
/// - `handle_sync_requests_system`
|
||||
/// - `handle_missing_deltas_system`
|
||||
///
|
||||
/// By polling the message queue once and routing to handlers, we eliminate
|
||||
/// O(n²) behavior and code duplication.
|
||||
///
|
||||
/// # Performance
|
||||
///
|
||||
/// - **Before**: O(n × m) where n = messages, m = systems (~5)
|
||||
/// - **After**: O(n) - each message processed exactly once
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::message_dispatcher_system;
|
||||
///
|
||||
/// App::new().add_systems(Update, message_dispatcher_system);
|
||||
/// ```
|
||||
pub fn message_dispatcher_system(world: &mut World) {
|
||||
// This is an exclusive system to avoid parameter conflicts with world access
|
||||
// Check if bridge exists
|
||||
if world.get_resource::<GossipBridge>().is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Atomically drain all pending messages from the incoming queue
|
||||
// This prevents race conditions where messages could arrive between individual
|
||||
// try_recv() calls
|
||||
let messages: Vec<crate::networking::VersionedMessage> = {
|
||||
let bridge = world.resource::<GossipBridge>();
|
||||
bridge.drain_incoming()
|
||||
};
|
||||
|
||||
// Dispatch each message (bridge is no longer borrowed)
|
||||
for message in messages {
|
||||
dispatch_message(world, message);
|
||||
}
|
||||
|
||||
// Flush all queued commands to ensure components are inserted immediately
|
||||
world.flush();
|
||||
}
|
||||
|
||||
/// Helper function to dispatch a single message
|
||||
/// This is separate to allow proper borrowing of world resources
|
||||
fn dispatch_message(world: &mut World, message: crate::networking::VersionedMessage) {
|
||||
match message.message {
|
||||
// EntityDelta - apply remote operations
|
||||
| SyncMessage::EntityDelta {
|
||||
entity_id,
|
||||
node_id,
|
||||
vector_clock,
|
||||
operations,
|
||||
} => {
|
||||
let delta = crate::networking::EntityDelta {
|
||||
entity_id,
|
||||
node_id,
|
||||
vector_clock,
|
||||
operations,
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Received EntityDelta for entity {:?} with {} operations",
|
||||
delta.entity_id,
|
||||
delta.operations.len()
|
||||
);
|
||||
|
||||
apply_entity_delta(&delta, world);
|
||||
},
|
||||
|
||||
// JoinRequest - new peer joining (or rejoining)
|
||||
| SyncMessage::JoinRequest {
|
||||
node_id,
|
||||
session_id,
|
||||
session_secret,
|
||||
last_known_clock,
|
||||
join_type,
|
||||
} => {
|
||||
info!(
|
||||
"Received JoinRequest from node {} for session {} (type: {:?})",
|
||||
node_id, session_id, join_type
|
||||
);
|
||||
|
||||
// Validate session secret if configured
|
||||
if let Some(expected) = world.get_resource::<SessionSecret>() {
|
||||
match &session_secret {
|
||||
| Some(provided_secret) => {
|
||||
if let Err(e) =
|
||||
validate_session_secret(provided_secret, expected.as_bytes())
|
||||
{
|
||||
error!("JoinRequest from {} rejected: {}", node_id, e);
|
||||
return; // Stop processing this message
|
||||
}
|
||||
info!("Session secret validated for node {}", node_id);
|
||||
},
|
||||
| None => {
|
||||
warn!(
|
||||
"JoinRequest from {} missing required session secret, rejecting",
|
||||
node_id
|
||||
);
|
||||
return; // Reject requests without secret when one is configured
|
||||
},
|
||||
}
|
||||
} else if session_secret.is_some() {
|
||||
// No session secret configured but peer provided one
|
||||
debug!("Session secret provided but none configured, accepting");
|
||||
}
|
||||
|
||||
// Hybrid join protocol: decide between FullState and MissingDeltas
|
||||
// Fresh joins always get FullState
|
||||
// Rejoins get deltas if <1000 operations, otherwise FullState
|
||||
let response = match (&join_type, &last_known_clock) {
|
||||
// Fresh join or no clock provided → send FullState
|
||||
| (JoinType::Fresh, _) | (_, None) => {
|
||||
info!("Fresh join from node {} - sending FullState", node_id);
|
||||
|
||||
// Collect networked entities
|
||||
let networked_entities = {
|
||||
let mut query = world.query::<(Entity, &NetworkedEntity)>();
|
||||
query.iter(world).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
// Build full state
|
||||
let type_registry = world.resource::<AppTypeRegistry>().read();
|
||||
let node_clock = world.resource::<NodeVectorClock>();
|
||||
let blob_store = world.get_resource::<BlobStore>();
|
||||
|
||||
build_full_state_from_data(
|
||||
world,
|
||||
&networked_entities,
|
||||
&type_registry,
|
||||
&node_clock,
|
||||
blob_store.map(|b| b as &BlobStore),
|
||||
)
|
||||
},
|
||||
|
||||
// Rejoin with known clock → check delta count
|
||||
| (JoinType::Rejoin { .. }, Some(their_clock)) => {
|
||||
info!(
|
||||
"Rejoin from node {} - checking delta count since last known clock",
|
||||
node_id
|
||||
);
|
||||
|
||||
// Get operation log and check missing deltas
|
||||
let operation_log = world.resource::<crate::networking::OperationLog>();
|
||||
let missing_deltas =
|
||||
operation_log.get_all_operations_newer_than(their_clock);
|
||||
|
||||
// If delta count is small (<= 1000 ops), send deltas
|
||||
// Otherwise fall back to full state
|
||||
if missing_deltas.len() <= 1000 {
|
||||
info!(
|
||||
"Rejoin from node {} - sending {} MissingDeltas (efficient rejoin)",
|
||||
node_id,
|
||||
missing_deltas.len()
|
||||
);
|
||||
|
||||
VersionedMessage::new(SyncMessage::MissingDeltas {
|
||||
deltas: missing_deltas,
|
||||
})
|
||||
} else {
|
||||
info!(
|
||||
"Rejoin from node {} - delta count {} exceeds threshold, sending FullState",
|
||||
node_id,
|
||||
missing_deltas.len()
|
||||
);
|
||||
|
||||
// Collect networked entities
|
||||
let networked_entities = {
|
||||
let mut query = world.query::<(Entity, &NetworkedEntity)>();
|
||||
query.iter(world).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
// Build full state
|
||||
let type_registry = world.resource::<AppTypeRegistry>().read();
|
||||
let node_clock = world.resource::<NodeVectorClock>();
|
||||
let blob_store = world.get_resource::<BlobStore>();
|
||||
|
||||
build_full_state_from_data(
|
||||
world,
|
||||
&networked_entities,
|
||||
&type_registry,
|
||||
&node_clock,
|
||||
blob_store.map(|b| b as &BlobStore),
|
||||
)
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
// Send response
|
||||
if let Some(bridge) = world.get_resource::<GossipBridge>() {
|
||||
if let Err(e) = bridge.send(response) {
|
||||
error!("Failed to send join response: {}", e);
|
||||
} else {
|
||||
info!("Sent join response to node {}", node_id);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// FullState - receiving world state after join
|
||||
| SyncMessage::FullState {
|
||||
entities,
|
||||
vector_clock,
|
||||
} => {
|
||||
info!("Received FullState with {} entities", entities.len());
|
||||
|
||||
// Use SystemState to properly borrow multiple resources
|
||||
let mut system_state: SystemState<(
|
||||
Commands,
|
||||
ResMut<NetworkEntityMap>,
|
||||
Res<AppTypeRegistry>,
|
||||
ResMut<NodeVectorClock>,
|
||||
Option<Res<BlobStore>>,
|
||||
Option<ResMut<TombstoneRegistry>>,
|
||||
)> = SystemState::new(world);
|
||||
|
||||
{
|
||||
let (
|
||||
mut commands,
|
||||
mut entity_map,
|
||||
type_registry,
|
||||
mut node_clock,
|
||||
blob_store,
|
||||
mut tombstone_registry,
|
||||
) = system_state.get_mut(world);
|
||||
let registry = type_registry.read();
|
||||
|
||||
apply_full_state(
|
||||
entities,
|
||||
vector_clock,
|
||||
&mut commands,
|
||||
&mut entity_map,
|
||||
®istry,
|
||||
&mut node_clock,
|
||||
blob_store.as_deref(),
|
||||
tombstone_registry.as_deref_mut(),
|
||||
);
|
||||
// registry is dropped here
|
||||
}
|
||||
|
||||
system_state.apply(world);
|
||||
},
|
||||
|
||||
// SyncRequest - peer requesting missing operations
|
||||
| SyncMessage::SyncRequest {
|
||||
node_id: requesting_node,
|
||||
vector_clock: their_clock,
|
||||
} => {
|
||||
debug!("Received SyncRequest from node {}", requesting_node);
|
||||
|
||||
if let Some(op_log) = world.get_resource::<OperationLog>() {
|
||||
// Find operations they're missing
|
||||
let missing_deltas = op_log.get_all_operations_newer_than(&their_clock);
|
||||
|
||||
if !missing_deltas.is_empty() {
|
||||
info!(
|
||||
"Sending {} missing deltas to node {}",
|
||||
missing_deltas.len(),
|
||||
requesting_node
|
||||
);
|
||||
|
||||
// Send MissingDeltas response
|
||||
let response = build_missing_deltas(missing_deltas);
|
||||
if let Some(bridge) = world.get_resource::<GossipBridge>() {
|
||||
if let Err(e) = bridge.send(response) {
|
||||
error!("Failed to send MissingDeltas: {}", e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
debug!("No missing deltas for node {}", requesting_node);
|
||||
}
|
||||
} else {
|
||||
warn!("Received SyncRequest but OperationLog resource not available");
|
||||
}
|
||||
},
|
||||
|
||||
// MissingDeltas - receiving operations we requested
|
||||
| SyncMessage::MissingDeltas { deltas } => {
|
||||
info!("Received MissingDeltas with {} operations", deltas.len());
|
||||
|
||||
// Apply each delta
|
||||
for delta in deltas {
|
||||
debug!("Applying missing delta for entity {:?}", delta.entity_id);
|
||||
|
||||
apply_entity_delta(&delta, world);
|
||||
}
|
||||
},
|
||||
|
||||
// Lock - entity lock protocol messages
|
||||
| SyncMessage::Lock(lock_msg) => {
|
||||
use crate::networking::LockMessage;
|
||||
|
||||
if let Some(mut registry) = world.get_resource_mut::<crate::networking::EntityLockRegistry>() {
|
||||
match lock_msg {
|
||||
| LockMessage::LockRequest { entity_id, node_id } => {
|
||||
debug!("Received LockRequest for entity {} from node {}", entity_id, node_id);
|
||||
|
||||
match registry.try_acquire(entity_id, node_id) {
|
||||
| Ok(()) => {
|
||||
// Acquired successfully - broadcast confirmation
|
||||
if let Some(bridge) = world.get_resource::<GossipBridge>() {
|
||||
let msg = VersionedMessage::new(SyncMessage::Lock(
|
||||
LockMessage::LockAcquired {
|
||||
entity_id,
|
||||
holder: node_id,
|
||||
},
|
||||
));
|
||||
if let Err(e) = bridge.send(msg) {
|
||||
error!("Failed to broadcast LockAcquired: {}", e);
|
||||
} else {
|
||||
info!("Lock acquired: entity {} by node {}", entity_id, node_id);
|
||||
}
|
||||
}
|
||||
},
|
||||
| Err(current_holder) => {
|
||||
// Already locked - send rejection
|
||||
if let Some(bridge) = world.get_resource::<GossipBridge>() {
|
||||
let msg = VersionedMessage::new(SyncMessage::Lock(
|
||||
LockMessage::LockRejected {
|
||||
entity_id,
|
||||
requester: node_id,
|
||||
current_holder,
|
||||
},
|
||||
));
|
||||
if let Err(e) = bridge.send(msg) {
|
||||
error!("Failed to send LockRejected: {}", e);
|
||||
} else {
|
||||
debug!("Lock rejected: entity {} requested by {} (held by {})",
|
||||
entity_id, node_id, current_holder);
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
| LockMessage::LockAcquired { entity_id, holder } => {
|
||||
debug!("Received LockAcquired for entity {} by node {}", entity_id, holder);
|
||||
// Lock already applied optimistically, just log confirmation
|
||||
},
|
||||
|
||||
| LockMessage::LockRejected {
|
||||
entity_id,
|
||||
requester,
|
||||
current_holder,
|
||||
} => {
|
||||
warn!(
|
||||
"Lock rejected: entity {} requested by {} (held by {})",
|
||||
entity_id, requester, current_holder
|
||||
);
|
||||
// Could trigger UI notification here
|
||||
},
|
||||
|
||||
| LockMessage::LockHeartbeat { entity_id, holder } => {
|
||||
trace!("Received LockHeartbeat for entity {} from node {}", entity_id, holder);
|
||||
|
||||
// Renew the lock's heartbeat timestamp
|
||||
if registry.renew_heartbeat(entity_id, holder) {
|
||||
trace!("Lock heartbeat renewed: entity {} by node {}", entity_id, holder);
|
||||
} else {
|
||||
debug!(
|
||||
"Received heartbeat for entity {} from {}, but lock not found or holder mismatch",
|
||||
entity_id, holder
|
||||
);
|
||||
}
|
||||
},
|
||||
|
||||
| LockMessage::LockRelease { entity_id, node_id } => {
|
||||
debug!("Received LockRelease for entity {} from node {}", entity_id, node_id);
|
||||
|
||||
if registry.release(entity_id, node_id) {
|
||||
// Broadcast confirmation
|
||||
if let Some(bridge) = world.get_resource::<GossipBridge>() {
|
||||
let msg = VersionedMessage::new(SyncMessage::Lock(
|
||||
LockMessage::LockReleased { entity_id },
|
||||
));
|
||||
if let Err(e) = bridge.send(msg) {
|
||||
error!("Failed to broadcast LockReleased: {}", e);
|
||||
} else {
|
||||
info!("Lock released: entity {}", entity_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
| LockMessage::LockReleased { entity_id } => {
|
||||
debug!("Received LockReleased for entity {}", entity_id);
|
||||
// Lock already released locally, just log confirmation
|
||||
},
|
||||
}
|
||||
} else {
|
||||
warn!("Received lock message but EntityLockRegistry not available");
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to build full state from collected data
|
||||
fn build_full_state_from_data(
|
||||
world: &World,
|
||||
networked_entities: &[(Entity, &NetworkedEntity)],
|
||||
type_registry: &bevy::reflect::TypeRegistry,
|
||||
node_clock: &NodeVectorClock,
|
||||
blob_store: Option<&BlobStore>,
|
||||
) -> crate::networking::VersionedMessage {
|
||||
use crate::{
|
||||
networking::{
|
||||
blob_support::create_component_data,
|
||||
messages::{
|
||||
ComponentState,
|
||||
EntityState,
|
||||
},
|
||||
},
|
||||
persistence::reflection::serialize_component,
|
||||
};
|
||||
|
||||
// Get tombstone registry to filter out deleted entities
|
||||
let tombstone_registry = world.get_resource::<crate::networking::TombstoneRegistry>();
|
||||
|
||||
let mut entities = Vec::new();
|
||||
|
||||
for (entity, networked) in networked_entities {
|
||||
// Skip tombstoned entities to prevent resurrection on joining nodes
|
||||
if let Some(registry) = &tombstone_registry {
|
||||
if registry.is_deleted(networked.network_id) {
|
||||
debug!(
|
||||
"Skipping tombstoned entity {:?} in full state build",
|
||||
networked.network_id
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let entity_ref = world.entity(*entity);
|
||||
let mut components = Vec::new();
|
||||
|
||||
// Iterate over all type registrations to find components
|
||||
for registration in type_registry.iter() {
|
||||
// Skip if no ReflectComponent data
|
||||
let Some(reflect_component) = registration.data::<ReflectComponent>() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let type_path = registration.type_info().type_path();
|
||||
|
||||
// Skip networked wrapper components
|
||||
if type_path.ends_with("::NetworkedEntity") ||
|
||||
type_path.ends_with("::NetworkedTransform") ||
|
||||
type_path.ends_with("::NetworkedSelection") ||
|
||||
type_path.ends_with("::NetworkedDrawingPath")
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try to reflect this component from the entity
|
||||
if let Some(reflected) = reflect_component.reflect(entity_ref) {
|
||||
// Serialize the component
|
||||
if let Ok(serialized) = serialize_component(reflected, type_registry) {
|
||||
// Create component data (inline or blob)
|
||||
let data = if let Some(store) = blob_store {
|
||||
match create_component_data(serialized, store) {
|
||||
| Ok(d) => d,
|
||||
| Err(_) => continue,
|
||||
}
|
||||
} else {
|
||||
crate::networking::ComponentData::Inline(serialized)
|
||||
};
|
||||
|
||||
components.push(ComponentState {
|
||||
component_type: type_path.to_string(),
|
||||
data,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entities.push(EntityState {
|
||||
entity_id: networked.network_id,
|
||||
owner_node_id: networked.owner_node_id,
|
||||
vector_clock: node_clock.clock.clone(),
|
||||
components,
|
||||
is_deleted: false,
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Built FullState with {} entities for new peer",
|
||||
entities.len()
|
||||
);
|
||||
|
||||
crate::networking::VersionedMessage::new(SyncMessage::FullState {
|
||||
entities,
|
||||
vector_clock: node_clock.clock.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn test_message_dispatcher_compiles() {
|
||||
// This test just ensures the dispatcher system compiles
|
||||
// Integration tests would require a full Bevy app setup
|
||||
}
|
||||
}
|
||||
517
crates/libmarathon/src/networking/messages.rs
Normal file
517
crates/libmarathon/src/networking/messages.rs
Normal file
@@ -0,0 +1,517 @@
|
||||
//! Network message types for CRDT synchronization
|
||||
//!
|
||||
//! This module defines the protocol messages used for distributed
|
||||
//! synchronization according to RFC 0001.
|
||||
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use crate::networking::{
|
||||
locks::LockMessage,
|
||||
operations::ComponentOp,
|
||||
session::SessionId,
|
||||
vector_clock::{
|
||||
NodeId,
|
||||
VectorClock,
|
||||
},
|
||||
};
|
||||
|
||||
/// Top-level message envelope with versioning
|
||||
///
|
||||
/// All messages sent over the network are wrapped in this envelope to support
|
||||
/// protocol version negotiation and future compatibility.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VersionedMessage {
|
||||
/// Protocol version (currently 1)
|
||||
pub version: u32,
|
||||
|
||||
/// The actual sync message
|
||||
pub message: SyncMessage,
|
||||
}
|
||||
|
||||
impl VersionedMessage {
|
||||
/// Current protocol version
|
||||
pub const CURRENT_VERSION: u32 = 1;
|
||||
|
||||
/// Create a new versioned message with the current protocol version
|
||||
pub fn new(message: SyncMessage) -> Self {
|
||||
Self {
|
||||
version: Self::CURRENT_VERSION,
|
||||
message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Join request type - distinguishes fresh joins from rejoin attempts
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum JoinType {
|
||||
/// Fresh join - never connected to this session before
|
||||
Fresh,
|
||||
|
||||
/// Rejoin - returning to a session we left earlier
|
||||
Rejoin {
|
||||
/// When we were last active in this session (Unix timestamp)
|
||||
last_active: i64,
|
||||
|
||||
/// Cached entity count from when we left
|
||||
entity_count: usize,
|
||||
},
|
||||
}
|
||||
|
||||
/// CRDT synchronization protocol messages
|
||||
///
|
||||
/// These messages implement the sync protocol defined in RFC 0001.
|
||||
///
|
||||
/// # Protocol Flow
|
||||
///
|
||||
/// 1. **Join**: New peer sends `JoinRequest`, receives `FullState`
|
||||
/// 2. **Normal Operation**: Peers broadcast `EntityDelta` on changes
|
||||
/// 3. **Anti-Entropy**: Periodic `SyncRequest` to detect missing operations
|
||||
/// 4. **Recovery**: `MissingDeltas` sent in response to `SyncRequest`
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum SyncMessage {
|
||||
/// Request to join the network and receive full state
|
||||
///
|
||||
/// Sent by a new peer when it first connects. For fresh joins, the response
|
||||
/// will be a `FullState` message. For rejoins with small deltas (<1000 ops),
|
||||
/// the response will be `MissingDeltas`.
|
||||
JoinRequest {
|
||||
/// ID of the node requesting to join
|
||||
node_id: NodeId,
|
||||
|
||||
/// Session ID to join
|
||||
session_id: SessionId,
|
||||
|
||||
/// Optional session secret for authentication
|
||||
session_secret: Option<Vec<u8>>,
|
||||
|
||||
/// Vector clock from when we last left this session
|
||||
/// None = fresh join, Some = rejoin
|
||||
last_known_clock: Option<VectorClock>,
|
||||
|
||||
/// Type of join (fresh or rejoin with metadata)
|
||||
join_type: JoinType,
|
||||
},
|
||||
|
||||
/// Complete world state sent to new peers
|
||||
///
|
||||
/// Contains all networked entities and their components. Sent in response
|
||||
/// to a `JoinRequest`.
|
||||
FullState {
|
||||
/// All entities in the world
|
||||
entities: Vec<EntityState>,
|
||||
|
||||
/// Current vector clock of the sending node
|
||||
vector_clock: VectorClock,
|
||||
},
|
||||
|
||||
/// Delta update for a single entity
|
||||
///
|
||||
/// Broadcast when a component changes. Recipients apply the operations
|
||||
/// using CRDT merge semantics.
|
||||
EntityDelta {
|
||||
/// Network ID of the entity being updated
|
||||
entity_id: uuid::Uuid,
|
||||
|
||||
/// Node that generated this delta
|
||||
node_id: NodeId,
|
||||
|
||||
/// Vector clock at the time this delta was created
|
||||
vector_clock: VectorClock,
|
||||
|
||||
/// Component operations (Set, SetAdd, SequenceInsert, etc.)
|
||||
operations: Vec<ComponentOp>,
|
||||
},
|
||||
|
||||
/// Request for operations newer than our vector clock
|
||||
///
|
||||
/// Sent periodically for anti-entropy. The recipient compares vector
|
||||
/// clocks and sends `MissingDeltas` if they have newer operations.
|
||||
SyncRequest {
|
||||
/// ID of the node requesting sync
|
||||
node_id: NodeId,
|
||||
|
||||
/// Our current vector clock
|
||||
vector_clock: VectorClock,
|
||||
},
|
||||
|
||||
/// Operations that the recipient is missing
|
||||
///
|
||||
/// Sent in response to `SyncRequest` when we have operations the peer
|
||||
/// doesn't know about yet.
|
||||
MissingDeltas {
|
||||
/// Entity deltas that the recipient is missing
|
||||
deltas: Vec<EntityDelta>,
|
||||
},
|
||||
|
||||
/// Entity lock protocol messages
|
||||
///
|
||||
/// Used for collaborative editing to prevent concurrent modifications.
|
||||
/// Locks are acquired when entities are selected and released when deselected.
|
||||
Lock(LockMessage),
|
||||
}
|
||||
|
||||
/// Complete state of a single entity
|
||||
///
|
||||
/// Used in `FullState` messages to transfer all components of an entity.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EntityState {
|
||||
/// Network ID of the entity
|
||||
pub entity_id: uuid::Uuid,
|
||||
|
||||
/// Node that originally created this entity
|
||||
pub owner_node_id: NodeId,
|
||||
|
||||
/// Vector clock when this entity was last updated
|
||||
pub vector_clock: VectorClock,
|
||||
|
||||
/// All components on this entity
|
||||
pub components: Vec<ComponentState>,
|
||||
|
||||
/// Whether this entity has been deleted (tombstone)
|
||||
pub is_deleted: bool,
|
||||
}
|
||||
|
||||
/// State of a single component
|
||||
///
|
||||
/// Contains the component type and its serialized data.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ComponentState {
|
||||
/// Type path of the component (e.g.,
|
||||
/// "bevy_transform::components::Transform")
|
||||
pub component_type: String,
|
||||
|
||||
/// Serialized component data (bincode)
|
||||
pub data: ComponentData,
|
||||
}
|
||||
|
||||
/// Component data - either inline or a blob reference
|
||||
///
|
||||
/// Components larger than 64KB are stored as blobs and referenced by hash.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum ComponentData {
|
||||
/// Inline data for small components (<64KB)
|
||||
Inline(Vec<u8>),
|
||||
|
||||
/// Reference to a blob for large components (>64KB)
|
||||
BlobRef {
|
||||
/// iroh-blobs hash
|
||||
hash: Vec<u8>,
|
||||
|
||||
/// Size of the blob in bytes
|
||||
size: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl ComponentData {
|
||||
/// Threshold for using blobs vs inline data (64KB)
|
||||
pub const BLOB_THRESHOLD: usize = 64 * 1024;
|
||||
|
||||
/// Create component data, automatically choosing inline vs blob
|
||||
pub fn new(data: Vec<u8>) -> Self {
|
||||
if data.len() > Self::BLOB_THRESHOLD {
|
||||
// Will be populated later when uploaded to iroh-blobs
|
||||
Self::BlobRef {
|
||||
hash: Vec::new(),
|
||||
size: data.len() as u64,
|
||||
}
|
||||
} else {
|
||||
Self::Inline(data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this is a blob reference
|
||||
pub fn is_blob(&self) -> bool {
|
||||
matches!(self, ComponentData::BlobRef { .. })
|
||||
}
|
||||
|
||||
/// Get inline data, if available
|
||||
pub fn as_inline(&self) -> Option<&[u8]> {
|
||||
match self {
|
||||
| ComponentData::Inline(data) => Some(data),
|
||||
| _ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get blob reference, if this is a blob
|
||||
pub fn as_blob_ref(&self) -> Option<(&[u8], u64)> {
|
||||
match self {
|
||||
| ComponentData::BlobRef { hash, size } => Some((hash, *size)),
|
||||
| _ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for EntityDelta to allow it to be used directly
|
||||
///
|
||||
/// This struct exists because EntityDelta is defined as an enum variant
|
||||
/// but we sometimes need to work with it as a standalone type.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EntityDelta {
|
||||
/// Network ID of the entity being updated
|
||||
pub entity_id: uuid::Uuid,
|
||||
|
||||
/// Node that generated this delta
|
||||
pub node_id: NodeId,
|
||||
|
||||
/// Vector clock at the time this delta was created
|
||||
pub vector_clock: VectorClock,
|
||||
|
||||
/// Component operations (Set, SetAdd, SequenceInsert, etc.)
|
||||
pub operations: Vec<ComponentOp>,
|
||||
}
|
||||
|
||||
impl EntityDelta {
|
||||
/// Create a new entity delta
|
||||
pub fn new(
|
||||
entity_id: uuid::Uuid,
|
||||
node_id: NodeId,
|
||||
vector_clock: VectorClock,
|
||||
operations: Vec<ComponentOp>,
|
||||
) -> Self {
|
||||
Self {
|
||||
entity_id,
|
||||
node_id,
|
||||
vector_clock,
|
||||
operations,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert to a SyncMessage::EntityDelta variant
|
||||
pub fn into_message(self) -> SyncMessage {
|
||||
SyncMessage::EntityDelta {
|
||||
entity_id: self.entity_id,
|
||||
node_id: self.node_id,
|
||||
vector_clock: self.vector_clock,
|
||||
operations: self.operations,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_versioned_message_creation() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let session_id = SessionId::new();
|
||||
let message = SyncMessage::JoinRequest {
|
||||
node_id,
|
||||
session_id,
|
||||
session_secret: None,
|
||||
last_known_clock: None,
|
||||
join_type: JoinType::Fresh,
|
||||
};
|
||||
|
||||
let versioned = VersionedMessage::new(message);
|
||||
assert_eq!(versioned.version, VersionedMessage::CURRENT_VERSION);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_component_data_inline() {
|
||||
let data = vec![1, 2, 3, 4];
|
||||
let component_data = ComponentData::new(data.clone());
|
||||
|
||||
assert!(!component_data.is_blob());
|
||||
assert_eq!(component_data.as_inline(), Some(data.as_slice()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_component_data_blob() {
|
||||
// Create data larger than threshold
|
||||
let data = vec![0u8; ComponentData::BLOB_THRESHOLD + 1];
|
||||
let component_data = ComponentData::new(data.clone());
|
||||
|
||||
assert!(component_data.is_blob());
|
||||
assert_eq!(component_data.as_inline(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entity_delta_creation() {
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let vector_clock = VectorClock::new();
|
||||
|
||||
let delta = EntityDelta::new(entity_id, node_id, vector_clock.clone(), vec![]);
|
||||
|
||||
assert_eq!(delta.entity_id, entity_id);
|
||||
assert_eq!(delta.node_id, node_id);
|
||||
assert_eq!(delta.vector_clock, vector_clock);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_message_serialization() -> bincode::Result<()> {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let session_id = SessionId::new();
|
||||
let message = SyncMessage::JoinRequest {
|
||||
node_id,
|
||||
session_id,
|
||||
session_secret: None,
|
||||
last_known_clock: None,
|
||||
join_type: JoinType::Fresh,
|
||||
};
|
||||
|
||||
let versioned = VersionedMessage::new(message);
|
||||
let bytes = bincode::serialize(&versioned)?;
|
||||
let deserialized: VersionedMessage = bincode::deserialize(&bytes)?;
|
||||
|
||||
assert_eq!(deserialized.version, versioned.version);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_full_state_serialization() -> bincode::Result<()> {
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let owner_node = uuid::Uuid::new_v4();
|
||||
|
||||
let entity_state = EntityState {
|
||||
entity_id,
|
||||
owner_node_id: owner_node,
|
||||
vector_clock: VectorClock::new(),
|
||||
components: vec![],
|
||||
is_deleted: false,
|
||||
};
|
||||
|
||||
let message = SyncMessage::FullState {
|
||||
entities: vec![entity_state],
|
||||
vector_clock: VectorClock::new(),
|
||||
};
|
||||
|
||||
let bytes = bincode::serialize(&message)?;
|
||||
let _deserialized: SyncMessage = bincode::deserialize(&bytes)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_join_type_fresh() {
|
||||
let join_type = JoinType::Fresh;
|
||||
|
||||
// Fresh join should serialize correctly
|
||||
let bytes = bincode::serialize(&join_type).unwrap();
|
||||
let deserialized: JoinType = bincode::deserialize(&bytes).unwrap();
|
||||
|
||||
assert!(matches!(deserialized, JoinType::Fresh));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_join_type_rejoin() {
|
||||
let join_type = JoinType::Rejoin {
|
||||
last_active: 1234567890,
|
||||
entity_count: 42,
|
||||
};
|
||||
|
||||
// Rejoin should serialize correctly
|
||||
let bytes = bincode::serialize(&join_type).unwrap();
|
||||
let deserialized: JoinType = bincode::deserialize(&bytes).unwrap();
|
||||
|
||||
match deserialized {
|
||||
| JoinType::Rejoin {
|
||||
last_active,
|
||||
entity_count,
|
||||
} => {
|
||||
assert_eq!(last_active, 1234567890);
|
||||
assert_eq!(entity_count, 42);
|
||||
},
|
||||
| _ => panic!("Expected JoinType::Rejoin"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hybrid_join_protocol_fresh() {
|
||||
// Fresh join should have no last_known_clock
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let session_id = SessionId::new();
|
||||
let message = SyncMessage::JoinRequest {
|
||||
node_id,
|
||||
session_id,
|
||||
session_secret: None,
|
||||
last_known_clock: None,
|
||||
join_type: JoinType::Fresh,
|
||||
};
|
||||
|
||||
let bytes = bincode::serialize(&message).unwrap();
|
||||
let deserialized: SyncMessage = bincode::deserialize(&bytes).unwrap();
|
||||
|
||||
match deserialized {
|
||||
| SyncMessage::JoinRequest {
|
||||
join_type,
|
||||
last_known_clock,
|
||||
..
|
||||
} => {
|
||||
assert!(matches!(join_type, JoinType::Fresh));
|
||||
assert!(last_known_clock.is_none());
|
||||
},
|
||||
| _ => panic!("Expected JoinRequest"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hybrid_join_protocol_rejoin() {
|
||||
// Rejoin should have last_known_clock
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let session_id = SessionId::new();
|
||||
let clock = VectorClock::new();
|
||||
let message = SyncMessage::JoinRequest {
|
||||
node_id,
|
||||
session_id,
|
||||
session_secret: None,
|
||||
last_known_clock: Some(clock.clone()),
|
||||
join_type: JoinType::Rejoin {
|
||||
last_active: 1234567890,
|
||||
entity_count: 100,
|
||||
},
|
||||
};
|
||||
|
||||
let bytes = bincode::serialize(&message).unwrap();
|
||||
let deserialized: SyncMessage = bincode::deserialize(&bytes).unwrap();
|
||||
|
||||
match deserialized {
|
||||
| SyncMessage::JoinRequest {
|
||||
join_type,
|
||||
last_known_clock,
|
||||
..
|
||||
} => {
|
||||
assert!(matches!(join_type, JoinType::Rejoin { .. }));
|
||||
assert_eq!(last_known_clock, Some(clock));
|
||||
},
|
||||
| _ => panic!("Expected JoinRequest"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_missing_deltas_serialization() -> bincode::Result<()> {
|
||||
// Test that MissingDeltas message serializes correctly
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let delta = EntityDelta {
|
||||
entity_id,
|
||||
node_id,
|
||||
vector_clock: clock,
|
||||
operations: vec![],
|
||||
};
|
||||
|
||||
let message = SyncMessage::MissingDeltas {
|
||||
deltas: vec![delta],
|
||||
};
|
||||
|
||||
let bytes = bincode::serialize(&message)?;
|
||||
let deserialized: SyncMessage = bincode::deserialize(&bytes)?;
|
||||
|
||||
match deserialized {
|
||||
| SyncMessage::MissingDeltas { deltas } => {
|
||||
assert_eq!(deltas.len(), 1);
|
||||
assert_eq!(deltas[0].entity_id, entity_id);
|
||||
},
|
||||
| _ => panic!("Expected MissingDeltas"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
139
crates/libmarathon/src/networking/mod.rs
Normal file
139
crates/libmarathon/src/networking/mod.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
//! CRDT-based networking layer for distributed synchronization
|
||||
//!
|
||||
//! This module implements the networking strategy defined in RFC 0001.
|
||||
//! It provides CRDT-based synchronization over iroh-gossip with support for:
|
||||
//!
|
||||
//! - **Entity Synchronization** - Automatic sync of NetworkedEntity components
|
||||
//! - **CRDT Merge Semantics** - LWW, OR-Set, and Sequence CRDTs
|
||||
//! - **Large Blob Support** - Files >64KB via iroh-blobs
|
||||
//! - **Join Protocol** - New peers receive full world state
|
||||
//! - **Anti-Entropy** - Periodic sync to repair network partitions
|
||||
//! - **Vector Clock** - Causality tracking for distributed operations
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```
|
||||
//! use libmarathon::networking::*;
|
||||
//! use uuid::Uuid;
|
||||
//!
|
||||
//! // Create a vector clock and track operations
|
||||
//! let node_id = Uuid::new_v4();
|
||||
//! let mut clock = VectorClock::new();
|
||||
//!
|
||||
//! // Increment the clock for local operations
|
||||
//! clock.increment(node_id);
|
||||
//!
|
||||
//! // Build a component operation
|
||||
//! let builder = ComponentOpBuilder::new(node_id, clock.clone());
|
||||
//! let op = builder.set(
|
||||
//! "Transform".to_string(),
|
||||
//! ComponentData::Inline(vec![1, 2, 3]),
|
||||
//! );
|
||||
//! ```
|
||||
|
||||
mod apply_ops;
|
||||
mod auth;
|
||||
mod blob_support;
|
||||
mod change_detection;
|
||||
mod components;
|
||||
mod delta_generation;
|
||||
mod entity_map;
|
||||
mod error;
|
||||
mod gossip_bridge;
|
||||
mod join_protocol;
|
||||
mod locks;
|
||||
mod merge;
|
||||
mod message_dispatcher;
|
||||
mod messages;
|
||||
mod operation_builder;
|
||||
mod operation_log;
|
||||
mod operations;
|
||||
mod orset;
|
||||
mod plugin;
|
||||
mod rga;
|
||||
mod session;
|
||||
mod session_lifecycle;
|
||||
mod sync_component;
|
||||
mod tombstones;
|
||||
mod vector_clock;
|
||||
|
||||
pub use apply_ops::*;
|
||||
pub use auth::*;
|
||||
pub use blob_support::*;
|
||||
pub use change_detection::*;
|
||||
pub use components::*;
|
||||
pub use delta_generation::*;
|
||||
pub use entity_map::*;
|
||||
pub use error::*;
|
||||
pub use gossip_bridge::*;
|
||||
pub use join_protocol::*;
|
||||
pub use locks::*;
|
||||
pub use merge::*;
|
||||
pub use message_dispatcher::*;
|
||||
pub use messages::*;
|
||||
pub use operation_builder::*;
|
||||
pub use operation_log::*;
|
||||
pub use operations::*;
|
||||
pub use orset::*;
|
||||
pub use plugin::*;
|
||||
pub use rga::*;
|
||||
pub use session::*;
|
||||
pub use session_lifecycle::*;
|
||||
pub use sync_component::*;
|
||||
pub use tombstones::*;
|
||||
pub use vector_clock::*;
|
||||
|
||||
/// Spawn a networked entity with persistence enabled
|
||||
///
|
||||
/// Creates an entity with both NetworkedEntity and Persisted components,
|
||||
/// registers it in the NetworkEntityMap, and returns the entity ID.
|
||||
/// This is the single source of truth for creating networked entities
|
||||
/// that need to be synchronized and persisted across the network.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `world`: Bevy world to spawn entity in
|
||||
/// - `entity_id`: Network ID for the entity (UUID)
|
||||
/// - `node_id`: ID of the node that owns this entity
|
||||
///
|
||||
/// # Returns
|
||||
/// The spawned Bevy entity's ID
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::spawn_networked_entity;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// fn my_system(world: &mut World) {
|
||||
/// let entity_id = Uuid::new_v4();
|
||||
/// let node_id = Uuid::new_v4();
|
||||
/// let entity = spawn_networked_entity(world, entity_id, node_id);
|
||||
/// // Entity is now registered and ready for sync/persistence
|
||||
/// }
|
||||
/// ```
|
||||
pub fn spawn_networked_entity(
|
||||
world: &mut bevy::prelude::World,
|
||||
entity_id: uuid::Uuid,
|
||||
node_id: uuid::Uuid,
|
||||
) -> bevy::prelude::Entity {
|
||||
use bevy::prelude::*;
|
||||
|
||||
// Spawn with both NetworkedEntity and Persisted components
|
||||
let entity = world
|
||||
.spawn((
|
||||
NetworkedEntity::with_id(entity_id, node_id),
|
||||
crate::persistence::Persisted::with_id(entity_id),
|
||||
))
|
||||
.id();
|
||||
|
||||
// Register in entity map
|
||||
let mut entity_map = world.resource_mut::<NetworkEntityMap>();
|
||||
entity_map.insert(entity_id, entity);
|
||||
|
||||
info!(
|
||||
"Spawned new networked entity {:?} from node {}",
|
||||
entity_id, node_id
|
||||
);
|
||||
|
||||
entity
|
||||
}
|
||||
273
crates/libmarathon/src/networking/operation_builder.rs
Normal file
273
crates/libmarathon/src/networking/operation_builder.rs
Normal file
@@ -0,0 +1,273 @@
|
||||
//! Build CRDT operations from ECS component changes
|
||||
//!
|
||||
//! This module provides utilities to convert Bevy component changes into
|
||||
//! ComponentOp operations that can be synchronized across the network.
|
||||
|
||||
use bevy::{
|
||||
prelude::*,
|
||||
reflect::TypeRegistry,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
networking::{
|
||||
blob_support::{
|
||||
BlobStore,
|
||||
create_component_data,
|
||||
},
|
||||
error::Result,
|
||||
messages::ComponentData,
|
||||
operations::{
|
||||
ComponentOp,
|
||||
ComponentOpBuilder,
|
||||
},
|
||||
vector_clock::{
|
||||
NodeId,
|
||||
VectorClock,
|
||||
},
|
||||
},
|
||||
persistence::reflection::serialize_component_typed,
|
||||
};
|
||||
|
||||
/// Build a Set operation (LWW) from a component
|
||||
///
|
||||
/// Serializes the component using Bevy's reflection system and creates a
|
||||
/// ComponentOp::Set for Last-Write-Wins synchronization. Automatically uses
|
||||
/// blob storage for components >64KB.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `component`: The component to serialize
|
||||
/// - `component_type`: Type path string
|
||||
/// - `node_id`: Our node ID
|
||||
/// - `vector_clock`: Current vector clock
|
||||
/// - `type_registry`: Bevy's type registry
|
||||
/// - `blob_store`: Optional blob store for large components
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A ComponentOp::Set ready to be broadcast
|
||||
pub fn build_set_operation(
|
||||
component: &dyn Reflect,
|
||||
component_type: String,
|
||||
node_id: NodeId,
|
||||
vector_clock: VectorClock,
|
||||
type_registry: &TypeRegistry,
|
||||
blob_store: Option<&BlobStore>,
|
||||
) -> Result<ComponentOp> {
|
||||
// Serialize the component
|
||||
let serialized = serialize_component_typed(component, type_registry)?;
|
||||
|
||||
// Create component data (inline or blob)
|
||||
let data = if let Some(store) = blob_store {
|
||||
create_component_data(serialized, store)?
|
||||
} else {
|
||||
ComponentData::Inline(serialized)
|
||||
};
|
||||
|
||||
// Build the operation
|
||||
let builder = ComponentOpBuilder::new(node_id, vector_clock);
|
||||
Ok(builder.set(component_type, data))
|
||||
}
|
||||
|
||||
/// Build Set operations for all components on an entity
|
||||
///
|
||||
/// This iterates over all components with reflection data and creates Set
|
||||
/// operations for each one. Automatically uses blob storage for large
|
||||
/// components.
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - `entity`: The entity to serialize
|
||||
/// - `world`: Bevy world
|
||||
/// - `node_id`: Our node ID
|
||||
/// - `vector_clock`: Current vector clock
|
||||
/// - `type_registry`: Bevy's type registry
|
||||
/// - `blob_store`: Optional blob store for large components
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Vector of ComponentOp::Set operations, one per component
|
||||
pub fn build_entity_operations(
|
||||
entity: Entity,
|
||||
world: &World,
|
||||
node_id: NodeId,
|
||||
vector_clock: VectorClock,
|
||||
type_registry: &TypeRegistry,
|
||||
blob_store: Option<&BlobStore>,
|
||||
) -> Vec<ComponentOp> {
|
||||
let mut operations = Vec::new();
|
||||
let entity_ref = world.entity(entity);
|
||||
|
||||
debug!(
|
||||
"build_entity_operations: Building operations for entity {:?}",
|
||||
entity
|
||||
);
|
||||
|
||||
// Iterate over all type registrations
|
||||
for registration in type_registry.iter() {
|
||||
// Skip if no ReflectComponent data
|
||||
let Some(reflect_component) = registration.data::<ReflectComponent>() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// Get the type path
|
||||
let type_path = registration.type_info().type_path();
|
||||
|
||||
// Skip certain components
|
||||
if type_path.ends_with("::NetworkedEntity") ||
|
||||
type_path.ends_with("::NetworkedTransform") ||
|
||||
type_path.ends_with("::NetworkedSelection") ||
|
||||
type_path.ends_with("::NetworkedDrawingPath")
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try to reflect this component from the entity
|
||||
if let Some(reflected) = reflect_component.reflect(entity_ref) {
|
||||
// Serialize the component
|
||||
if let Ok(serialized) = serialize_component_typed(reflected, type_registry) {
|
||||
// Create component data (inline or blob)
|
||||
let data = if let Some(store) = blob_store {
|
||||
if let Ok(component_data) = create_component_data(serialized, store) {
|
||||
component_data
|
||||
} else {
|
||||
continue; // Skip this component if blob storage fails
|
||||
}
|
||||
} else {
|
||||
ComponentData::Inline(serialized)
|
||||
};
|
||||
|
||||
// Build the operation
|
||||
let mut clock = vector_clock.clone();
|
||||
clock.increment(node_id);
|
||||
|
||||
operations.push(ComponentOp::Set {
|
||||
component_type: type_path.to_string(),
|
||||
data,
|
||||
vector_clock: clock.clone(),
|
||||
});
|
||||
|
||||
debug!(" ✓ Added Set operation for {}", type_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
"build_entity_operations: Built {} operations for entity {:?}",
|
||||
operations.len(),
|
||||
entity
|
||||
);
|
||||
operations
|
||||
}
|
||||
|
||||
/// Build a Set operation for Transform component specifically
|
||||
///
|
||||
/// This is a helper for the common case of synchronizing Transform changes.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::{
|
||||
/// VectorClock,
|
||||
/// build_transform_operation,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// # fn example(transform: &Transform, type_registry: &bevy::reflect::TypeRegistry) {
|
||||
/// let node_id = Uuid::new_v4();
|
||||
/// let clock = VectorClock::new();
|
||||
///
|
||||
/// let op = build_transform_operation(transform, node_id, clock, type_registry, None).unwrap();
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn build_transform_operation(
|
||||
transform: &Transform,
|
||||
node_id: NodeId,
|
||||
vector_clock: VectorClock,
|
||||
type_registry: &TypeRegistry,
|
||||
blob_store: Option<&BlobStore>,
|
||||
) -> Result<ComponentOp> {
|
||||
// Use reflection to serialize Transform
|
||||
let serialized = serialize_component_typed(transform.as_reflect(), type_registry)?;
|
||||
|
||||
// Create component data (inline or blob)
|
||||
let data = if let Some(store) = blob_store {
|
||||
create_component_data(serialized, store)?
|
||||
} else {
|
||||
ComponentData::Inline(serialized)
|
||||
};
|
||||
|
||||
let builder = ComponentOpBuilder::new(node_id, vector_clock);
|
||||
Ok(builder.set(
|
||||
"bevy_transform::components::transform::Transform".to_string(),
|
||||
data,
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_build_transform_operation() {
|
||||
let mut type_registry = TypeRegistry::new();
|
||||
type_registry.register::<Transform>();
|
||||
|
||||
let transform = Transform::default();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let op =
|
||||
build_transform_operation(&transform, node_id, clock, &type_registry, None).unwrap();
|
||||
|
||||
assert!(op.is_set());
|
||||
assert_eq!(
|
||||
op.component_type(),
|
||||
Some("bevy_transform::components::transform::Transform")
|
||||
);
|
||||
assert_eq!(op.vector_clock().get(node_id), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_entity_operations() {
|
||||
let mut world = World::new();
|
||||
let mut type_registry = TypeRegistry::new();
|
||||
|
||||
// Register Transform
|
||||
type_registry.register::<Transform>();
|
||||
|
||||
// Spawn entity with Transform
|
||||
let entity = world.spawn(Transform::from_xyz(1.0, 2.0, 3.0)).id();
|
||||
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let ops = build_entity_operations(entity, &world, node_id, clock, &type_registry, None);
|
||||
|
||||
// Should have at least Transform operation
|
||||
assert!(!ops.is_empty());
|
||||
assert!(ops.iter().all(|op| op.is_set()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vector_clock_increment() {
|
||||
let mut type_registry = TypeRegistry::new();
|
||||
type_registry.register::<Transform>();
|
||||
|
||||
let transform = Transform::default();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let mut clock = VectorClock::new();
|
||||
|
||||
let op1 =
|
||||
build_transform_operation(&transform, node_id, clock.clone(), &type_registry, None)
|
||||
.unwrap();
|
||||
assert_eq!(op1.vector_clock().get(node_id), 1);
|
||||
|
||||
clock.increment(node_id);
|
||||
let op2 =
|
||||
build_transform_operation(&transform, node_id, clock.clone(), &type_registry, None)
|
||||
.unwrap();
|
||||
assert_eq!(op2.vector_clock().get(node_id), 2);
|
||||
}
|
||||
}
|
||||
514
crates/libmarathon/src/networking/operation_log.rs
Normal file
514
crates/libmarathon/src/networking/operation_log.rs
Normal file
@@ -0,0 +1,514 @@
|
||||
//! Operation log for anti-entropy and partition recovery
|
||||
//!
|
||||
//! This module maintains a bounded log of recent operations for each entity,
|
||||
//! enabling peers to request missing deltas after network partitions or when
|
||||
//! they join late.
|
||||
//!
|
||||
//! The operation log:
|
||||
//! - Stores EntityDelta messages for recent operations
|
||||
//! - Bounded by time (keep operations from last N minutes) or size (max M ops)
|
||||
//! - Allows peers to request operations newer than their vector clock
|
||||
//! - Supports periodic anti-entropy sync to repair partitions
|
||||
|
||||
use std::collections::{
|
||||
HashMap,
|
||||
VecDeque,
|
||||
};
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::networking::{
|
||||
GossipBridge,
|
||||
NodeVectorClock,
|
||||
messages::{
|
||||
EntityDelta,
|
||||
SyncMessage,
|
||||
VersionedMessage,
|
||||
},
|
||||
vector_clock::{
|
||||
NodeId,
|
||||
VectorClock,
|
||||
},
|
||||
};
|
||||
|
||||
/// Maximum operations to keep per entity (prevents unbounded growth)
|
||||
const MAX_OPS_PER_ENTITY: usize = 100;
|
||||
|
||||
/// Maximum age for operations (in seconds)
|
||||
const MAX_OP_AGE_SECS: u64 = 300; // 5 minutes
|
||||
|
||||
/// Maximum number of entities to track (prevents unbounded growth)
|
||||
const MAX_ENTITIES: usize = 10_000;
|
||||
|
||||
/// Operation log entry with timestamp
|
||||
#[derive(Debug, Clone)]
|
||||
struct LogEntry {
|
||||
/// The entity delta operation
|
||||
delta: EntityDelta,
|
||||
|
||||
/// When this operation was created (for pruning old ops)
|
||||
timestamp: std::time::Instant,
|
||||
}
|
||||
|
||||
/// Resource storing the operation log for all entities
|
||||
///
|
||||
/// This is used for anti-entropy - peers can request operations they're missing
|
||||
/// by comparing vector clocks.
|
||||
///
|
||||
/// # Bounded Growth
|
||||
///
|
||||
/// The operation log is bounded in three ways:
|
||||
/// - Max operations per entity: `MAX_OPS_PER_ENTITY` (100)
|
||||
/// - Max operation age: `MAX_OP_AGE_SECS` (300 seconds / 5 minutes)
|
||||
/// - Max entities: `MAX_ENTITIES` (10,000)
|
||||
///
|
||||
/// When limits are exceeded, oldest operations/entities are pruned
|
||||
/// automatically.
|
||||
#[derive(Resource)]
|
||||
pub struct OperationLog {
|
||||
/// Map from entity ID to list of recent operations
|
||||
logs: HashMap<uuid::Uuid, VecDeque<LogEntry>>,
|
||||
|
||||
/// Total number of operations across all entities (for monitoring)
|
||||
total_ops: usize,
|
||||
}
|
||||
|
||||
impl OperationLog {
|
||||
/// Create a new operation log
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
logs: HashMap::new(),
|
||||
total_ops: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Record an operation in the log
|
||||
///
|
||||
/// This should be called whenever we generate or apply an EntityDelta.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::{
|
||||
/// EntityDelta,
|
||||
/// OperationLog,
|
||||
/// VectorClock,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let mut log = OperationLog::new();
|
||||
/// let entity_id = Uuid::new_v4();
|
||||
/// let node_id = Uuid::new_v4();
|
||||
/// let clock = VectorClock::new();
|
||||
///
|
||||
/// let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||
/// log.record_operation(delta);
|
||||
/// ```
|
||||
pub fn record_operation(&mut self, delta: EntityDelta) {
|
||||
// Check if we're at the entity limit
|
||||
if self.logs.len() >= MAX_ENTITIES && !self.logs.contains_key(&delta.entity_id) {
|
||||
// Prune oldest entity (by finding entity with oldest operation)
|
||||
if let Some(oldest_entity_id) = self.find_oldest_entity() {
|
||||
warn!(
|
||||
"Operation log at entity limit ({}), pruning oldest entity {:?}",
|
||||
MAX_ENTITIES, oldest_entity_id
|
||||
);
|
||||
if let Some(removed_log) = self.logs.remove(&oldest_entity_id) {
|
||||
self.total_ops = self.total_ops.saturating_sub(removed_log.len());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let entry = LogEntry {
|
||||
delta: delta.clone(),
|
||||
timestamp: std::time::Instant::now(),
|
||||
};
|
||||
|
||||
let log = self
|
||||
.logs
|
||||
.entry(delta.entity_id)
|
||||
.or_insert_with(VecDeque::new);
|
||||
log.push_back(entry);
|
||||
self.total_ops += 1;
|
||||
|
||||
// Prune if we exceed max ops per entity
|
||||
while log.len() > MAX_OPS_PER_ENTITY {
|
||||
log.pop_front();
|
||||
self.total_ops = self.total_ops.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the entity with the oldest operation (for LRU eviction)
|
||||
fn find_oldest_entity(&self) -> Option<uuid::Uuid> {
|
||||
self.logs
|
||||
.iter()
|
||||
.filter_map(|(entity_id, log)| log.front().map(|entry| (*entity_id, entry.timestamp)))
|
||||
.min_by_key(|(_, timestamp)| *timestamp)
|
||||
.map(|(entity_id, _)| entity_id)
|
||||
}
|
||||
|
||||
/// Get operations for an entity that are newer than a given vector clock
|
||||
///
|
||||
/// This is used to respond to SyncRequest messages.
|
||||
pub fn get_operations_newer_than(
|
||||
&self,
|
||||
entity_id: uuid::Uuid,
|
||||
their_clock: &VectorClock,
|
||||
) -> Vec<EntityDelta> {
|
||||
let Some(log) = self.logs.get(&entity_id) else {
|
||||
return vec![];
|
||||
};
|
||||
|
||||
log.iter()
|
||||
.filter(|entry| {
|
||||
// Include operation if they haven't seen it yet
|
||||
// (their clock happened before the operation's clock)
|
||||
their_clock.happened_before(&entry.delta.vector_clock)
|
||||
})
|
||||
.map(|entry| entry.delta.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get all operations newer than a vector clock across all entities
|
||||
///
|
||||
/// This is used to respond to SyncRequest for the entire world state.
|
||||
pub fn get_all_operations_newer_than(&self, their_clock: &VectorClock) -> Vec<EntityDelta> {
|
||||
let mut deltas = Vec::new();
|
||||
|
||||
for (entity_id, _log) in &self.logs {
|
||||
let entity_deltas = self.get_operations_newer_than(*entity_id, their_clock);
|
||||
deltas.extend(entity_deltas);
|
||||
}
|
||||
|
||||
deltas
|
||||
}
|
||||
|
||||
/// Prune old operations from the log
|
||||
///
|
||||
/// This should be called periodically to prevent unbounded growth.
|
||||
/// Removes operations older than MAX_OP_AGE_SECS.
|
||||
pub fn prune_old_operations(&mut self) {
|
||||
let max_age = std::time::Duration::from_secs(MAX_OP_AGE_SECS);
|
||||
let now = std::time::Instant::now();
|
||||
|
||||
let mut pruned_count = 0;
|
||||
|
||||
for log in self.logs.values_mut() {
|
||||
let before_len = log.len();
|
||||
log.retain(|entry| now.duration_since(entry.timestamp) < max_age);
|
||||
pruned_count += before_len - log.len();
|
||||
}
|
||||
|
||||
// Update total_ops counter
|
||||
self.total_ops = self.total_ops.saturating_sub(pruned_count);
|
||||
|
||||
// Remove empty logs
|
||||
self.logs.retain(|_, log| !log.is_empty());
|
||||
}
|
||||
|
||||
/// Get the number of operations in the log
|
||||
pub fn total_operations(&self) -> usize {
|
||||
self.total_ops
|
||||
}
|
||||
|
||||
/// Get the number of entities with logged operations
|
||||
pub fn num_entities(&self) -> usize {
|
||||
self.logs.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for OperationLog {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a SyncRequest message
|
||||
///
|
||||
/// This asks peers to send us any operations we're missing.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::{
|
||||
/// VectorClock,
|
||||
/// build_sync_request,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node_id = Uuid::new_v4();
|
||||
/// let clock = VectorClock::new();
|
||||
/// let request = build_sync_request(node_id, clock);
|
||||
/// ```
|
||||
pub fn build_sync_request(node_id: NodeId, vector_clock: VectorClock) -> VersionedMessage {
|
||||
VersionedMessage::new(SyncMessage::SyncRequest {
|
||||
node_id,
|
||||
vector_clock,
|
||||
})
|
||||
}
|
||||
|
||||
/// Build a MissingDeltas response
|
||||
///
|
||||
/// This contains operations that the requesting peer is missing.
|
||||
pub fn build_missing_deltas(deltas: Vec<EntityDelta>) -> VersionedMessage {
|
||||
VersionedMessage::new(SyncMessage::MissingDeltas { deltas })
|
||||
}
|
||||
|
||||
/// System to handle SyncRequest messages
|
||||
///
|
||||
/// When we receive a SyncRequest, compare vector clocks and send any
|
||||
/// operations the peer is missing.
|
||||
///
|
||||
/// Add this to your app:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::handle_sync_requests_system;
|
||||
///
|
||||
/// App::new().add_systems(Update, handle_sync_requests_system);
|
||||
/// ```
|
||||
pub fn handle_sync_requests_system(
|
||||
bridge: Option<Res<GossipBridge>>,
|
||||
operation_log: Res<OperationLog>,
|
||||
) {
|
||||
let Some(bridge) = bridge else {
|
||||
return;
|
||||
};
|
||||
|
||||
// Poll for SyncRequest messages
|
||||
while let Some(message) = bridge.try_recv() {
|
||||
match message.message {
|
||||
| SyncMessage::SyncRequest {
|
||||
node_id: requesting_node,
|
||||
vector_clock: their_clock,
|
||||
} => {
|
||||
debug!("Received SyncRequest from node {}", requesting_node);
|
||||
|
||||
// Find operations they're missing
|
||||
let missing_deltas = operation_log.get_all_operations_newer_than(&their_clock);
|
||||
|
||||
if !missing_deltas.is_empty() {
|
||||
info!(
|
||||
"Sending {} missing deltas to node {}",
|
||||
missing_deltas.len(),
|
||||
requesting_node
|
||||
);
|
||||
|
||||
// Send MissingDeltas response
|
||||
let response = build_missing_deltas(missing_deltas);
|
||||
if let Err(e) = bridge.send(response) {
|
||||
error!("Failed to send MissingDeltas: {}", e);
|
||||
}
|
||||
} else {
|
||||
debug!("No missing deltas for node {}", requesting_node);
|
||||
}
|
||||
},
|
||||
| _ => {
|
||||
// Not a SyncRequest, ignore
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// System to handle MissingDeltas messages
|
||||
///
|
||||
/// When we receive MissingDeltas (in response to our SyncRequest), apply them.
|
||||
pub fn handle_missing_deltas_system(world: &mut World) {
|
||||
// Check if bridge exists
|
||||
if world.get_resource::<GossipBridge>().is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Clone the bridge to avoid borrowing issues
|
||||
let bridge = world.resource::<GossipBridge>().clone();
|
||||
|
||||
// Poll for MissingDeltas messages
|
||||
while let Some(message) = bridge.try_recv() {
|
||||
match message.message {
|
||||
| SyncMessage::MissingDeltas { deltas } => {
|
||||
info!("Received MissingDeltas with {} operations", deltas.len());
|
||||
|
||||
// Apply each delta
|
||||
for delta in deltas {
|
||||
debug!("Applying missing delta for entity {:?}", delta.entity_id);
|
||||
|
||||
crate::networking::apply_entity_delta(&delta, world);
|
||||
}
|
||||
},
|
||||
| _ => {
|
||||
// Not MissingDeltas, ignore
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// System to periodically send SyncRequest for anti-entropy
|
||||
///
|
||||
/// This runs every N seconds to request any operations we might be missing,
|
||||
/// helping to repair network partitions.
|
||||
///
|
||||
/// **NOTE:** This is a simple timer-based implementation. Phase 14 will add
|
||||
/// adaptive sync intervals based on network conditions.
|
||||
pub fn periodic_sync_system(
|
||||
bridge: Option<Res<GossipBridge>>,
|
||||
node_clock: Res<NodeVectorClock>,
|
||||
time: Res<Time>,
|
||||
mut last_sync: Local<f32>,
|
||||
) {
|
||||
let Some(bridge) = bridge else {
|
||||
return;
|
||||
};
|
||||
|
||||
// Sync every 10 seconds
|
||||
const SYNC_INTERVAL: f32 = 10.0;
|
||||
|
||||
*last_sync += time.delta_secs();
|
||||
|
||||
if *last_sync >= SYNC_INTERVAL {
|
||||
*last_sync = 0.0;
|
||||
|
||||
debug!("Sending periodic SyncRequest for anti-entropy");
|
||||
|
||||
let request = build_sync_request(node_clock.node_id, node_clock.clock.clone());
|
||||
if let Err(e) = bridge.send(request) {
|
||||
error!("Failed to send SyncRequest: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// System to prune old operations from the log
|
||||
///
|
||||
/// This runs periodically to remove operations older than MAX_OP_AGE_SECS.
|
||||
pub fn prune_operation_log_system(
|
||||
mut operation_log: ResMut<OperationLog>,
|
||||
time: Res<Time>,
|
||||
mut last_prune: Local<f32>,
|
||||
) {
|
||||
// Prune every 60 seconds
|
||||
const PRUNE_INTERVAL: f32 = 60.0;
|
||||
|
||||
*last_prune += time.delta_secs();
|
||||
|
||||
if *last_prune >= PRUNE_INTERVAL {
|
||||
*last_prune = 0.0;
|
||||
|
||||
let before = operation_log.total_operations();
|
||||
operation_log.prune_old_operations();
|
||||
let after = operation_log.total_operations();
|
||||
|
||||
if before != after {
|
||||
debug!("Pruned operation log: {} ops -> {} ops", before, after);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_operation_log_creation() {
|
||||
let log = OperationLog::new();
|
||||
assert_eq!(log.num_entities(), 0);
|
||||
assert_eq!(log.total_operations(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_operation() {
|
||||
let mut log = OperationLog::new();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||
log.record_operation(delta);
|
||||
|
||||
assert_eq!(log.num_entities(), 1);
|
||||
assert_eq!(log.total_operations(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_operations_newer_than() {
|
||||
let mut log = OperationLog::new();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
|
||||
// Create two operations with different clocks
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node_id);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node_id);
|
||||
clock2.increment(node_id);
|
||||
|
||||
let delta1 = EntityDelta::new(entity_id, node_id, clock1.clone(), vec![]);
|
||||
let delta2 = EntityDelta::new(entity_id, node_id, clock2.clone(), vec![]);
|
||||
|
||||
log.record_operation(delta1);
|
||||
log.record_operation(delta2);
|
||||
|
||||
// Request with clock1 should get delta2
|
||||
let newer = log.get_operations_newer_than(entity_id, &clock1);
|
||||
assert_eq!(newer.len(), 1);
|
||||
assert_eq!(newer[0].vector_clock, clock2);
|
||||
|
||||
// Request with clock2 should get nothing
|
||||
let newer = log.get_operations_newer_than(entity_id, &clock2);
|
||||
assert_eq!(newer.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_ops_per_entity() {
|
||||
let mut log = OperationLog::new();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
|
||||
// Add more than MAX_OPS_PER_ENTITY operations
|
||||
for _ in 0..(MAX_OPS_PER_ENTITY + 10) {
|
||||
let mut clock = VectorClock::new();
|
||||
clock.increment(node_id);
|
||||
let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||
log.record_operation(delta);
|
||||
}
|
||||
|
||||
// Should be capped at MAX_OPS_PER_ENTITY
|
||||
assert_eq!(log.total_operations(), MAX_OPS_PER_ENTITY);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_sync_request() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let request = build_sync_request(node_id, clock.clone());
|
||||
|
||||
match request.message {
|
||||
| SyncMessage::SyncRequest {
|
||||
node_id: req_node_id,
|
||||
vector_clock,
|
||||
} => {
|
||||
assert_eq!(req_node_id, node_id);
|
||||
assert_eq!(vector_clock, clock);
|
||||
},
|
||||
| _ => panic!("Expected SyncRequest"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_missing_deltas() {
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||
let response = build_missing_deltas(vec![delta.clone()]);
|
||||
|
||||
match response.message {
|
||||
| SyncMessage::MissingDeltas { deltas } => {
|
||||
assert_eq!(deltas.len(), 1);
|
||||
assert_eq!(deltas[0].entity_id, entity_id);
|
||||
},
|
||||
| _ => panic!("Expected MissingDeltas"),
|
||||
}
|
||||
}
|
||||
}
|
||||
399
crates/libmarathon/src/networking/operations.rs
Normal file
399
crates/libmarathon/src/networking/operations.rs
Normal file
@@ -0,0 +1,399 @@
|
||||
//! CRDT operations for component synchronization
|
||||
//!
|
||||
//! This module defines the different types of operations that can be performed
|
||||
//! on components in the distributed system. Each operation type corresponds to
|
||||
//! a specific CRDT merge strategy.
|
||||
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use crate::networking::{
|
||||
messages::ComponentData,
|
||||
vector_clock::VectorClock,
|
||||
};
|
||||
|
||||
/// Component operations for CRDT synchronization
|
||||
///
|
||||
/// Different operation types support different CRDT semantics:
|
||||
///
|
||||
/// - **Set** - Last-Write-Wins (LWW) using vector clocks
|
||||
/// - **SetAdd/SetRemove** - OR-Set for concurrent add/remove
|
||||
/// - **SequenceInsert/SequenceDelete** - RGA for ordered sequences
|
||||
/// - **Delete** - Entity deletion with tombstone
|
||||
///
|
||||
/// # CRDT Merge Semantics
|
||||
///
|
||||
/// ## Last-Write-Wins (Set)
|
||||
/// - Use vector clock to determine which operation happened later
|
||||
/// - If concurrent, use node ID as tiebreaker
|
||||
/// - Example: Transform component position changes
|
||||
///
|
||||
/// ## OR-Set (SetAdd/SetRemove)
|
||||
/// - Add wins over remove when concurrent
|
||||
/// - Uses unique operation IDs to track add/remove pairs
|
||||
/// - Example: Selection of multiple entities, tags
|
||||
///
|
||||
/// ## Sequence CRDT (SequenceInsert/SequenceDelete)
|
||||
/// - Maintains ordering across concurrent inserts
|
||||
/// - Uses RGA (Replicated Growable Array) algorithm
|
||||
/// - Example: Collaborative drawing paths
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum ComponentOp {
|
||||
/// Set a component value (Last-Write-Wins)
|
||||
///
|
||||
/// Used for components where the latest value should win. The vector clock
|
||||
/// determines which operation is "later". If operations are concurrent,
|
||||
/// the node ID is used as a tiebreaker for deterministic results.
|
||||
///
|
||||
/// The data field can be either inline (for small components) or a blob
|
||||
/// reference (for components >64KB).
|
||||
Set {
|
||||
/// Type path of the component
|
||||
component_type: String,
|
||||
|
||||
/// Component data (inline or blob reference)
|
||||
data: ComponentData,
|
||||
|
||||
/// Vector clock when this set operation was created
|
||||
vector_clock: VectorClock,
|
||||
},
|
||||
|
||||
/// Add an element to an OR-Set
|
||||
///
|
||||
/// Adds an element to a set that supports concurrent add/remove. Each add
|
||||
/// has a unique ID so that removes can reference specific adds.
|
||||
SetAdd {
|
||||
/// Type path of the component
|
||||
component_type: String,
|
||||
|
||||
/// Unique ID for this add operation
|
||||
operation_id: uuid::Uuid,
|
||||
|
||||
/// Element being added (serialized)
|
||||
element: Vec<u8>,
|
||||
|
||||
/// Vector clock when this add was created
|
||||
vector_clock: VectorClock,
|
||||
},
|
||||
|
||||
/// Remove an element from an OR-Set
|
||||
///
|
||||
/// Removes an element by referencing the add operation IDs that added it.
|
||||
/// If concurrent with an add, the add wins (observed-remove semantics).
|
||||
SetRemove {
|
||||
/// Type path of the component
|
||||
component_type: String,
|
||||
|
||||
/// IDs of the add operations being removed
|
||||
removed_ids: Vec<uuid::Uuid>,
|
||||
|
||||
/// Vector clock when this remove was created
|
||||
vector_clock: VectorClock,
|
||||
},
|
||||
|
||||
/// Insert an element into a sequence (RGA)
|
||||
///
|
||||
/// Inserts an element after a specific position in a sequence. Uses RGA
|
||||
/// (Replicated Growable Array) to maintain consistent ordering across
|
||||
/// concurrent inserts.
|
||||
SequenceInsert {
|
||||
/// Type path of the component
|
||||
component_type: String,
|
||||
|
||||
/// Unique ID for this insert operation
|
||||
operation_id: uuid::Uuid,
|
||||
|
||||
/// ID of the element to insert after (None = beginning)
|
||||
after_id: Option<uuid::Uuid>,
|
||||
|
||||
/// Element being inserted (serialized)
|
||||
element: Vec<u8>,
|
||||
|
||||
/// Vector clock when this insert was created
|
||||
vector_clock: VectorClock,
|
||||
},
|
||||
|
||||
/// Delete an element from a sequence (RGA)
|
||||
///
|
||||
/// Marks an element as deleted in the sequence. The element remains in the
|
||||
/// structure (tombstone) to preserve ordering for concurrent operations.
|
||||
SequenceDelete {
|
||||
/// Type path of the component
|
||||
component_type: String,
|
||||
|
||||
/// ID of the element to delete
|
||||
element_id: uuid::Uuid,
|
||||
|
||||
/// Vector clock when this delete was created
|
||||
vector_clock: VectorClock,
|
||||
},
|
||||
|
||||
/// Delete an entire entity
|
||||
///
|
||||
/// Marks an entity as deleted (tombstone). The entity remains in the
|
||||
/// system to prevent resurrection if old operations arrive.
|
||||
Delete {
|
||||
/// Vector clock when this delete was created
|
||||
vector_clock: VectorClock,
|
||||
},
|
||||
}
|
||||
|
||||
impl ComponentOp {
|
||||
/// Get the component type for this operation
|
||||
pub fn component_type(&self) -> Option<&str> {
|
||||
match self {
|
||||
| ComponentOp::Set { component_type, .. } |
|
||||
ComponentOp::SetAdd { component_type, .. } |
|
||||
ComponentOp::SetRemove { component_type, .. } |
|
||||
ComponentOp::SequenceInsert { component_type, .. } |
|
||||
ComponentOp::SequenceDelete { component_type, .. } => Some(component_type),
|
||||
| ComponentOp::Delete { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the vector clock for this operation
|
||||
pub fn vector_clock(&self) -> &VectorClock {
|
||||
match self {
|
||||
| ComponentOp::Set { vector_clock, .. } |
|
||||
ComponentOp::SetAdd { vector_clock, .. } |
|
||||
ComponentOp::SetRemove { vector_clock, .. } |
|
||||
ComponentOp::SequenceInsert { vector_clock, .. } |
|
||||
ComponentOp::SequenceDelete { vector_clock, .. } |
|
||||
ComponentOp::Delete { vector_clock } => vector_clock,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this is a Set operation (LWW)
|
||||
pub fn is_set(&self) -> bool {
|
||||
matches!(self, ComponentOp::Set { .. })
|
||||
}
|
||||
|
||||
/// Check if this is an OR-Set operation
|
||||
pub fn is_or_set(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
ComponentOp::SetAdd { .. } | ComponentOp::SetRemove { .. }
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if this is a Sequence operation (RGA)
|
||||
pub fn is_sequence(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
ComponentOp::SequenceInsert { .. } | ComponentOp::SequenceDelete { .. }
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if this is a Delete operation
|
||||
pub fn is_delete(&self) -> bool {
|
||||
matches!(self, ComponentOp::Delete { .. })
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for creating ComponentOp instances
|
||||
///
|
||||
/// Provides a fluent API for constructing operations with proper vector clock
|
||||
/// timestamps.
|
||||
pub struct ComponentOpBuilder {
|
||||
node_id: uuid::Uuid,
|
||||
vector_clock: VectorClock,
|
||||
}
|
||||
|
||||
impl ComponentOpBuilder {
|
||||
/// Create a new operation builder
|
||||
pub fn new(node_id: uuid::Uuid, vector_clock: VectorClock) -> Self {
|
||||
Self {
|
||||
node_id,
|
||||
vector_clock,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a Set operation (LWW)
|
||||
pub fn set(mut self, component_type: String, data: ComponentData) -> ComponentOp {
|
||||
self.vector_clock.increment(self.node_id);
|
||||
ComponentOp::Set {
|
||||
component_type,
|
||||
data,
|
||||
vector_clock: self.vector_clock,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a SetAdd operation (OR-Set)
|
||||
pub fn set_add(mut self, component_type: String, element: Vec<u8>) -> ComponentOp {
|
||||
self.vector_clock.increment(self.node_id);
|
||||
ComponentOp::SetAdd {
|
||||
component_type,
|
||||
operation_id: uuid::Uuid::new_v4(),
|
||||
element,
|
||||
vector_clock: self.vector_clock,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a SetRemove operation (OR-Set)
|
||||
pub fn set_remove(
|
||||
mut self,
|
||||
component_type: String,
|
||||
removed_ids: Vec<uuid::Uuid>,
|
||||
) -> ComponentOp {
|
||||
self.vector_clock.increment(self.node_id);
|
||||
ComponentOp::SetRemove {
|
||||
component_type,
|
||||
removed_ids,
|
||||
vector_clock: self.vector_clock,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a SequenceInsert operation (RGA)
|
||||
pub fn sequence_insert(
|
||||
mut self,
|
||||
component_type: String,
|
||||
after_id: Option<uuid::Uuid>,
|
||||
element: Vec<u8>,
|
||||
) -> ComponentOp {
|
||||
self.vector_clock.increment(self.node_id);
|
||||
ComponentOp::SequenceInsert {
|
||||
component_type,
|
||||
operation_id: uuid::Uuid::new_v4(),
|
||||
after_id,
|
||||
element,
|
||||
vector_clock: self.vector_clock,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a SequenceDelete operation (RGA)
|
||||
pub fn sequence_delete(
|
||||
mut self,
|
||||
component_type: String,
|
||||
element_id: uuid::Uuid,
|
||||
) -> ComponentOp {
|
||||
self.vector_clock.increment(self.node_id);
|
||||
ComponentOp::SequenceDelete {
|
||||
component_type,
|
||||
element_id,
|
||||
vector_clock: self.vector_clock,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a Delete operation
|
||||
pub fn delete(mut self) -> ComponentOp {
|
||||
self.vector_clock.increment(self.node_id);
|
||||
ComponentOp::Delete {
|
||||
vector_clock: self.vector_clock,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_component_type() {
|
||||
let op = ComponentOp::Set {
|
||||
component_type: "Transform".to_string(),
|
||||
data: ComponentData::Inline(vec![1, 2, 3]),
|
||||
vector_clock: VectorClock::new(),
|
||||
};
|
||||
|
||||
assert_eq!(op.component_type(), Some("Transform"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_component_type_delete() {
|
||||
let op = ComponentOp::Delete {
|
||||
vector_clock: VectorClock::new(),
|
||||
};
|
||||
|
||||
assert_eq!(op.component_type(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_set() {
|
||||
let op = ComponentOp::Set {
|
||||
component_type: "Transform".to_string(),
|
||||
data: ComponentData::Inline(vec![1, 2, 3]),
|
||||
vector_clock: VectorClock::new(),
|
||||
};
|
||||
|
||||
assert!(op.is_set());
|
||||
assert!(!op.is_or_set());
|
||||
assert!(!op.is_sequence());
|
||||
assert!(!op.is_delete());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_or_set() {
|
||||
let op = ComponentOp::SetAdd {
|
||||
component_type: "Selection".to_string(),
|
||||
operation_id: uuid::Uuid::new_v4(),
|
||||
element: vec![1, 2, 3],
|
||||
vector_clock: VectorClock::new(),
|
||||
};
|
||||
|
||||
assert!(!op.is_set());
|
||||
assert!(op.is_or_set());
|
||||
assert!(!op.is_sequence());
|
||||
assert!(!op.is_delete());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_sequence() {
|
||||
let op = ComponentOp::SequenceInsert {
|
||||
component_type: "DrawingPath".to_string(),
|
||||
operation_id: uuid::Uuid::new_v4(),
|
||||
after_id: None,
|
||||
element: vec![1, 2, 3],
|
||||
vector_clock: VectorClock::new(),
|
||||
};
|
||||
|
||||
assert!(!op.is_set());
|
||||
assert!(!op.is_or_set());
|
||||
assert!(op.is_sequence());
|
||||
assert!(!op.is_delete());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_builder_set() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let builder = ComponentOpBuilder::new(node_id, clock);
|
||||
let op = builder.set(
|
||||
"Transform".to_string(),
|
||||
ComponentData::Inline(vec![1, 2, 3]),
|
||||
);
|
||||
|
||||
assert!(op.is_set());
|
||||
assert_eq!(op.vector_clock().get(node_id), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_builder_set_add() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let builder = ComponentOpBuilder::new(node_id, clock);
|
||||
let op = builder.set_add("Selection".to_string(), vec![1, 2, 3]);
|
||||
|
||||
assert!(op.is_or_set());
|
||||
assert_eq!(op.vector_clock().get(node_id), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization() -> bincode::Result<()> {
|
||||
let op = ComponentOp::Set {
|
||||
component_type: "Transform".to_string(),
|
||||
data: ComponentData::Inline(vec![1, 2, 3]),
|
||||
vector_clock: VectorClock::new(),
|
||||
};
|
||||
|
||||
let bytes = bincode::serialize(&op)?;
|
||||
let deserialized: ComponentOp = bincode::deserialize(&bytes)?;
|
||||
|
||||
assert!(deserialized.is_set());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
490
crates/libmarathon/src/networking/orset.rs
Normal file
490
crates/libmarathon/src/networking/orset.rs
Normal file
@@ -0,0 +1,490 @@
|
||||
//! OR-Set (Observed-Remove Set) CRDT implementation
|
||||
//!
|
||||
//! This module provides a conflict-free replicated set that supports concurrent
|
||||
//! add and remove operations with "add-wins" semantics.
|
||||
//!
|
||||
//! ## OR-Set Semantics
|
||||
//!
|
||||
//! - **Add-wins**: If an element is concurrently added and removed, the add
|
||||
//! wins
|
||||
//! - **Observed-remove**: Removes only affect adds that have been observed
|
||||
//! (happened-before)
|
||||
//! - **Unique operation IDs**: Each add generates a unique ID to track
|
||||
//! add/remove pairs
|
||||
//!
|
||||
//! ## Example
|
||||
//!
|
||||
//! ```
|
||||
//! use libmarathon::networking::{
|
||||
//! OrElement,
|
||||
//! OrSet,
|
||||
//! };
|
||||
//! use uuid::Uuid;
|
||||
//!
|
||||
//! let node1 = Uuid::new_v4();
|
||||
//! let node2 = Uuid::new_v4();
|
||||
//!
|
||||
//! // Node 1 adds "foo"
|
||||
//! let mut set1: OrSet<String> = OrSet::new();
|
||||
//! let (add_id, _) = set1.add("foo".to_string(), node1);
|
||||
//!
|
||||
//! // Node 2 concurrently adds "bar"
|
||||
//! let mut set2: OrSet<String> = OrSet::new();
|
||||
//! set2.add("bar".to_string(), node2);
|
||||
//!
|
||||
//! // Node 1 removes "foo" (observes own add)
|
||||
//! set1.remove(vec![add_id]);
|
||||
//!
|
||||
//! // Merge sets - "bar" should be present, "foo" should be removed
|
||||
//! set1.merge(&set2);
|
||||
//! assert_eq!(set1.len(), 1);
|
||||
//! assert!(set1.contains(&"bar".to_string()));
|
||||
//! assert!(!set1.contains(&"foo".to_string()));
|
||||
//! ```
|
||||
|
||||
use std::collections::{
|
||||
HashMap,
|
||||
HashSet,
|
||||
};
|
||||
|
||||
use bevy::prelude::*;
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use crate::networking::vector_clock::NodeId;
|
||||
|
||||
/// An element in an OR-Set with its unique operation ID
|
||||
///
|
||||
/// Each add operation generates a unique ID. The same logical element can have
|
||||
/// multiple IDs if it's added multiple times (e.g., removed then re-added).
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct OrElement<T> {
|
||||
/// The actual element value
|
||||
pub value: T,
|
||||
|
||||
/// Unique ID for this add operation
|
||||
pub operation_id: uuid::Uuid,
|
||||
|
||||
/// Node that performed the add
|
||||
pub adding_node: NodeId,
|
||||
}
|
||||
|
||||
/// OR-Set (Observed-Remove Set) CRDT
|
||||
///
|
||||
/// A replicated set supporting concurrent add/remove with add-wins semantics.
|
||||
/// This is based on the "Optimized Observed-Remove Set" algorithm.
|
||||
///
|
||||
/// # Type Parameters
|
||||
///
|
||||
/// - `T`: The element type (must be Clone, Eq, Hash, Serialize, Deserialize)
|
||||
///
|
||||
/// # Internal Structure
|
||||
///
|
||||
/// - `elements`: Map from operation_id → (value, adding_node)
|
||||
/// - `tombstones`: Set of removed operation IDs
|
||||
///
|
||||
/// An element is "present" if it has an operation ID in `elements` that's
|
||||
/// not in `tombstones`.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct OrSet<T> {
|
||||
/// Map from operation ID to (value, adding_node)
|
||||
elements: HashMap<uuid::Uuid, (T, NodeId)>,
|
||||
|
||||
/// Set of removed operation IDs
|
||||
tombstones: HashSet<uuid::Uuid>,
|
||||
}
|
||||
|
||||
impl<T> OrSet<T>
|
||||
where
|
||||
T: Clone + Eq + std::hash::Hash + Serialize + for<'de> Deserialize<'de>,
|
||||
{
|
||||
/// Create a new empty OR-Set
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
elements: HashMap::new(),
|
||||
tombstones: HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an element to the set
|
||||
///
|
||||
/// Returns (operation_id, was_new) where was_new indicates if this value
|
||||
/// wasn't already present.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::OrSet;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node = Uuid::new_v4();
|
||||
/// let mut set: OrSet<String> = OrSet::new();
|
||||
///
|
||||
/// let (id, was_new) = set.add("foo".to_string(), node);
|
||||
/// assert!(was_new);
|
||||
/// assert!(set.contains(&"foo".to_string()));
|
||||
/// ```
|
||||
pub fn add(&mut self, value: T, node_id: NodeId) -> (uuid::Uuid, bool) {
|
||||
let operation_id = uuid::Uuid::new_v4();
|
||||
let was_new = !self.contains(&value);
|
||||
|
||||
self.elements.insert(operation_id, (value, node_id));
|
||||
|
||||
(operation_id, was_new)
|
||||
}
|
||||
|
||||
/// Remove elements by their operation IDs
|
||||
///
|
||||
/// This implements observed-remove semantics: only the specific add
|
||||
/// operations identified by these IDs are removed.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::OrSet;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node = Uuid::new_v4();
|
||||
/// let mut set: OrSet<String> = OrSet::new();
|
||||
///
|
||||
/// let (id, _) = set.add("foo".to_string(), node);
|
||||
/// assert!(set.contains(&"foo".to_string()));
|
||||
///
|
||||
/// set.remove(vec![id]);
|
||||
/// assert!(!set.contains(&"foo".to_string()));
|
||||
/// ```
|
||||
pub fn remove(&mut self, operation_ids: Vec<uuid::Uuid>) {
|
||||
for id in operation_ids {
|
||||
self.tombstones.insert(id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a value is present in the set
|
||||
///
|
||||
/// A value is present if it has at least one operation ID that's not
|
||||
/// tombstoned.
|
||||
pub fn contains(&self, value: &T) -> bool {
|
||||
self.elements
|
||||
.iter()
|
||||
.any(|(id, (v, _))| v == value && !self.tombstones.contains(id))
|
||||
}
|
||||
|
||||
/// Get all present values
|
||||
///
|
||||
/// Returns an iterator over values that are currently in the set
|
||||
/// (not tombstoned).
|
||||
pub fn values(&self) -> impl Iterator<Item = &T> {
|
||||
self.elements
|
||||
.iter()
|
||||
.filter(|(id, _)| !self.tombstones.contains(id))
|
||||
.map(|(_, (value, _))| value)
|
||||
}
|
||||
|
||||
/// Get all operation IDs for a specific value
|
||||
///
|
||||
/// This is used when removing a value - we need to tombstone all its
|
||||
/// operation IDs.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::OrSet;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node = Uuid::new_v4();
|
||||
/// let mut set: OrSet<String> = OrSet::new();
|
||||
///
|
||||
/// set.add("foo".to_string(), node);
|
||||
/// set.add("foo".to_string(), node); // Add same value again
|
||||
///
|
||||
/// let ids = set.get_operation_ids(&"foo".to_string());
|
||||
/// assert_eq!(ids.len(), 2); // Two operation IDs for "foo"
|
||||
/// ```
|
||||
pub fn get_operation_ids(&self, value: &T) -> Vec<uuid::Uuid> {
|
||||
self.elements
|
||||
.iter()
|
||||
.filter(|(id, (v, _))| v == value && !self.tombstones.contains(id))
|
||||
.map(|(id, _)| *id)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get the number of distinct values in the set
|
||||
pub fn len(&self) -> usize {
|
||||
let mut seen = HashSet::new();
|
||||
self.elements
|
||||
.iter()
|
||||
.filter(|(id, (value, _))| !self.tombstones.contains(id) && seen.insert(value))
|
||||
.count()
|
||||
}
|
||||
|
||||
/// Check if the set is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
/// Merge another OR-Set into this one
|
||||
///
|
||||
/// This implements the CRDT merge operation:
|
||||
/// - Union all elements
|
||||
/// - Union all tombstones
|
||||
/// - Add-wins: elements not in tombstones are present
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::OrSet;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node1 = Uuid::new_v4();
|
||||
/// let node2 = Uuid::new_v4();
|
||||
///
|
||||
/// let mut set1: OrSet<String> = OrSet::new();
|
||||
/// set1.add("foo".to_string(), node1);
|
||||
///
|
||||
/// let mut set2: OrSet<String> = OrSet::new();
|
||||
/// set2.add("bar".to_string(), node2);
|
||||
///
|
||||
/// set1.merge(&set2);
|
||||
/// assert_eq!(set1.len(), 2);
|
||||
/// assert!(set1.contains(&"foo".to_string()));
|
||||
/// assert!(set1.contains(&"bar".to_string()));
|
||||
/// ```
|
||||
pub fn merge(&mut self, other: &OrSet<T>) {
|
||||
// Union elements
|
||||
for (id, (value, node)) in &other.elements {
|
||||
self.elements
|
||||
.entry(*id)
|
||||
.or_insert_with(|| (value.clone(), *node));
|
||||
}
|
||||
|
||||
// Union tombstones
|
||||
for id in &other.tombstones {
|
||||
self.tombstones.insert(*id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear the set
|
||||
///
|
||||
/// Removes all elements and tombstones.
|
||||
pub fn clear(&mut self) {
|
||||
self.elements.clear();
|
||||
self.tombstones.clear();
|
||||
}
|
||||
|
||||
/// Garbage collect tombstoned elements
|
||||
///
|
||||
/// Removes elements that are tombstoned to save memory. This is safe
|
||||
/// because once an operation is tombstoned, it stays tombstoned.
|
||||
///
|
||||
/// This should be called periodically to prevent unbounded growth.
|
||||
pub fn garbage_collect(&mut self) {
|
||||
self.elements.retain(|id, _| !self.tombstones.contains(id));
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for OrSet<T>
|
||||
where
|
||||
T: Clone + Eq + std::hash::Hash + Serialize + for<'de> Deserialize<'de>,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_orset_new() {
|
||||
let set: OrSet<String> = OrSet::new();
|
||||
assert!(set.is_empty());
|
||||
assert_eq!(set.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_add() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut set: OrSet<String> = OrSet::new();
|
||||
|
||||
let (_, was_new) = set.add("foo".to_string(), node);
|
||||
assert!(was_new);
|
||||
assert!(set.contains(&"foo".to_string()));
|
||||
assert_eq!(set.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_add_duplicate() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut set: OrSet<String> = OrSet::new();
|
||||
|
||||
let (id1, was_new1) = set.add("foo".to_string(), node);
|
||||
assert!(was_new1);
|
||||
|
||||
let (id2, was_new2) = set.add("foo".to_string(), node);
|
||||
assert!(!was_new2);
|
||||
assert_ne!(id1, id2); // Different operation IDs
|
||||
|
||||
assert_eq!(set.len(), 1); // Still one distinct value
|
||||
let ids = set.get_operation_ids(&"foo".to_string());
|
||||
assert_eq!(ids.len(), 2); // But two operation IDs
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_remove() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut set: OrSet<String> = OrSet::new();
|
||||
|
||||
let (id, _) = set.add("foo".to_string(), node);
|
||||
assert!(set.contains(&"foo".to_string()));
|
||||
|
||||
set.remove(vec![id]);
|
||||
assert!(!set.contains(&"foo".to_string()));
|
||||
assert_eq!(set.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_add_remove_add() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut set: OrSet<String> = OrSet::new();
|
||||
|
||||
// Add
|
||||
let (id1, _) = set.add("foo".to_string(), node);
|
||||
assert!(set.contains(&"foo".to_string()));
|
||||
|
||||
// Remove
|
||||
set.remove(vec![id1]);
|
||||
assert!(!set.contains(&"foo".to_string()));
|
||||
|
||||
// Add again (new operation ID)
|
||||
let (_id2, was_new) = set.add("foo".to_string(), node);
|
||||
assert!(was_new); // It's new because we removed it
|
||||
assert!(set.contains(&"foo".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_merge_simple() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut set1: OrSet<String> = OrSet::new();
|
||||
set1.add("foo".to_string(), node1);
|
||||
|
||||
let mut set2: OrSet<String> = OrSet::new();
|
||||
set2.add("bar".to_string(), node2);
|
||||
|
||||
set1.merge(&set2);
|
||||
|
||||
assert_eq!(set1.len(), 2);
|
||||
assert!(set1.contains(&"foo".to_string()));
|
||||
assert!(set1.contains(&"bar".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_merge_add_wins() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut set1: OrSet<String> = OrSet::new();
|
||||
let (id, _) = set1.add("foo".to_string(), node1);
|
||||
set1.remove(vec![id]); // Remove it
|
||||
|
||||
let mut set2: OrSet<String> = OrSet::new();
|
||||
set2.add("foo".to_string(), node2); // Concurrently add (different ID)
|
||||
|
||||
set1.merge(&set2);
|
||||
|
||||
// Add should win
|
||||
assert!(set1.contains(&"foo".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_merge_observed_remove() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut set1: OrSet<String> = OrSet::new();
|
||||
let (id, _) = set1.add("foo".to_string(), node1);
|
||||
|
||||
let mut set2 = set1.clone(); // set2 observes the add
|
||||
|
||||
set2.remove(vec![id]); // set2 removes after observing
|
||||
|
||||
set1.merge(&set2);
|
||||
|
||||
// Remove should win because it observed the add
|
||||
assert!(!set1.contains(&"foo".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_values() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut set: OrSet<String> = OrSet::new();
|
||||
|
||||
set.add("foo".to_string(), node);
|
||||
set.add("bar".to_string(), node);
|
||||
set.add("baz".to_string(), node);
|
||||
|
||||
let values: HashSet<_> = set.values().cloned().collect();
|
||||
assert_eq!(values.len(), 3);
|
||||
assert!(values.contains("foo"));
|
||||
assert!(values.contains("bar"));
|
||||
assert!(values.contains("baz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_garbage_collect() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut set: OrSet<String> = OrSet::new();
|
||||
|
||||
let (id1, _) = set.add("foo".to_string(), node);
|
||||
let (_id2, _) = set.add("bar".to_string(), node);
|
||||
|
||||
set.remove(vec![id1]);
|
||||
|
||||
// Before GC
|
||||
assert_eq!(set.elements.len(), 2);
|
||||
assert_eq!(set.tombstones.len(), 1);
|
||||
|
||||
set.garbage_collect();
|
||||
|
||||
// After GC - tombstoned element removed
|
||||
assert_eq!(set.elements.len(), 1);
|
||||
assert_eq!(set.tombstones.len(), 1);
|
||||
assert!(set.contains(&"bar".to_string()));
|
||||
assert!(!set.contains(&"foo".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_clear() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut set: OrSet<String> = OrSet::new();
|
||||
|
||||
set.add("foo".to_string(), node);
|
||||
set.add("bar".to_string(), node);
|
||||
assert_eq!(set.len(), 2);
|
||||
|
||||
set.clear();
|
||||
assert!(set.is_empty());
|
||||
assert_eq!(set.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_orset_serialization() -> bincode::Result<()> {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut set: OrSet<String> = OrSet::new();
|
||||
|
||||
set.add("foo".to_string(), node);
|
||||
set.add("bar".to_string(), node);
|
||||
|
||||
let bytes = bincode::serialize(&set)?;
|
||||
let deserialized: OrSet<String> = bincode::deserialize(&bytes)?;
|
||||
|
||||
assert_eq!(deserialized.len(), 2);
|
||||
assert!(deserialized.contains(&"foo".to_string()));
|
||||
assert!(deserialized.contains(&"bar".to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
395
crates/libmarathon/src/networking/plugin.rs
Normal file
395
crates/libmarathon/src/networking/plugin.rs
Normal file
@@ -0,0 +1,395 @@
|
||||
//! Bevy plugin for CRDT networking
|
||||
//!
|
||||
//! This module provides a complete Bevy plugin that integrates all networking
|
||||
//! components: delta generation, operation log, anti-entropy, join protocol,
|
||||
//! tombstones, and CRDT types.
|
||||
//!
|
||||
//! # Quick Start
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use bevy::prelude::*;
|
||||
//! use libmarathon::networking::{
|
||||
//! NetworkingConfig,
|
||||
//! NetworkingPlugin,
|
||||
//! };
|
||||
//! use uuid::Uuid;
|
||||
//!
|
||||
//! fn main() {
|
||||
//! App::new()
|
||||
//! .add_plugins(DefaultPlugins)
|
||||
//! .add_plugins(NetworkingPlugin::new(NetworkingConfig {
|
||||
//! node_id: Uuid::new_v4(),
|
||||
//! sync_interval_secs: 10.0,
|
||||
//! prune_interval_secs: 60.0,
|
||||
//! tombstone_gc_interval_secs: 300.0,
|
||||
//! }))
|
||||
//! .run();
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::networking::{
|
||||
change_detection::{
|
||||
LastSyncVersions,
|
||||
auto_detect_transform_changes_system,
|
||||
},
|
||||
delta_generation::{
|
||||
NodeVectorClock,
|
||||
generate_delta_system,
|
||||
},
|
||||
entity_map::{
|
||||
NetworkEntityMap,
|
||||
cleanup_despawned_entities_system,
|
||||
register_networked_entities_system,
|
||||
},
|
||||
locks::{
|
||||
EntityLockRegistry,
|
||||
broadcast_lock_heartbeats_system,
|
||||
cleanup_expired_locks_system,
|
||||
release_locks_on_deselection_system,
|
||||
},
|
||||
message_dispatcher::message_dispatcher_system,
|
||||
operation_log::{
|
||||
OperationLog,
|
||||
periodic_sync_system,
|
||||
prune_operation_log_system,
|
||||
},
|
||||
session_lifecycle::{
|
||||
initialize_session_system,
|
||||
save_session_on_shutdown_system,
|
||||
},
|
||||
tombstones::{
|
||||
TombstoneRegistry,
|
||||
garbage_collect_tombstones_system,
|
||||
handle_local_deletions_system,
|
||||
},
|
||||
vector_clock::NodeId,
|
||||
};
|
||||
|
||||
/// Configuration for the networking plugin
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NetworkingConfig {
|
||||
/// Unique ID for this node
|
||||
pub node_id: NodeId,
|
||||
|
||||
/// How often to send SyncRequest for anti-entropy (in seconds)
|
||||
/// Default: 10.0 seconds
|
||||
pub sync_interval_secs: f32,
|
||||
|
||||
/// How often to prune old operations from the log (in seconds)
|
||||
/// Default: 60.0 seconds (1 minute)
|
||||
pub prune_interval_secs: f32,
|
||||
|
||||
/// How often to garbage collect tombstones (in seconds)
|
||||
/// Default: 300.0 seconds (5 minutes)
|
||||
pub tombstone_gc_interval_secs: f32,
|
||||
}
|
||||
|
||||
impl Default for NetworkingConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
node_id: uuid::Uuid::new_v4(),
|
||||
sync_interval_secs: 10.0,
|
||||
prune_interval_secs: 60.0,
|
||||
tombstone_gc_interval_secs: 300.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Optional session secret for authentication
|
||||
///
|
||||
/// This is a pre-shared secret that controls access to the gossip network.
|
||||
/// If configured, all joining nodes must provide the correct session secret
|
||||
/// to receive the full state.
|
||||
///
|
||||
/// # Security Model
|
||||
///
|
||||
/// The session secret provides network-level access control by:
|
||||
/// - Preventing unauthorized nodes from joining the gossip
|
||||
/// - Hash-based comparison prevents timing attacks
|
||||
/// - Works alongside iroh-gossip's built-in QUIC transport encryption
|
||||
///
|
||||
/// # Usage
|
||||
///
|
||||
/// Insert this as a Bevy resource to enable session secret validation:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::{
|
||||
/// NetworkingPlugin,
|
||||
/// SessionSecret,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// App::new()
|
||||
/// .add_plugins(NetworkingPlugin::default_with_node_id(Uuid::new_v4()))
|
||||
/// .insert_resource(SessionSecret::new(b"my_secret_key"))
|
||||
/// .run();
|
||||
/// ```
|
||||
#[derive(Resource, Clone)]
|
||||
pub struct SessionSecret(Vec<u8>);
|
||||
|
||||
impl SessionSecret {
|
||||
/// Create a new session secret from bytes
|
||||
pub fn new(secret: impl Into<Vec<u8>>) -> Self {
|
||||
Self(secret.into())
|
||||
}
|
||||
|
||||
/// Get the secret as a byte slice
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Bevy plugin for CRDT networking
|
||||
///
|
||||
/// This plugin sets up all systems and resources needed for distributed
|
||||
/// synchronization using CRDTs.
|
||||
///
|
||||
/// # Systems Added
|
||||
///
|
||||
/// ## Startup
|
||||
/// - Initialize or restore session from persistence (auto-rejoin)
|
||||
///
|
||||
/// ## PreUpdate
|
||||
/// - Register newly spawned networked entities
|
||||
/// - **Central message dispatcher** (handles all incoming messages efficiently)
|
||||
/// - EntityDelta messages
|
||||
/// - JoinRequest messages
|
||||
/// - FullState messages
|
||||
/// - SyncRequest messages
|
||||
/// - MissingDeltas messages
|
||||
/// - Lock messages (LockRequest, LockAcquired, LockRejected, LockHeartbeat, LockRelease, LockReleased)
|
||||
///
|
||||
/// ## Update
|
||||
/// - Auto-detect Transform changes
|
||||
/// - Handle local entity deletions
|
||||
/// - Release locks when entities are deselected
|
||||
///
|
||||
/// ## PostUpdate
|
||||
/// - Generate and broadcast EntityDelta for changed entities
|
||||
/// - Periodic SyncRequest for anti-entropy
|
||||
/// - Broadcast lock heartbeats to maintain active locks
|
||||
/// - Prune old operations from operation log
|
||||
/// - Garbage collect tombstones
|
||||
/// - Cleanup expired locks (5-second timeout)
|
||||
/// - Cleanup despawned entities from entity map
|
||||
///
|
||||
/// ## Last
|
||||
/// - Save session state and vector clock to persistence
|
||||
///
|
||||
/// # Resources Added
|
||||
///
|
||||
/// - `NodeVectorClock` - This node's vector clock
|
||||
/// - `NetworkEntityMap` - Bidirectional entity ID mapping
|
||||
/// - `LastSyncVersions` - Change detection for entities
|
||||
/// - `OperationLog` - Operation log for anti-entropy
|
||||
/// - `TombstoneRegistry` - Tombstone tracking for deletions
|
||||
/// - `EntityLockRegistry` - Entity lock registry with heartbeat tracking
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::{
|
||||
/// NetworkingConfig,
|
||||
/// NetworkingPlugin,
|
||||
/// };
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// App::new()
|
||||
/// .add_plugins(DefaultPlugins)
|
||||
/// .add_plugins(NetworkingPlugin::new(NetworkingConfig {
|
||||
/// node_id: Uuid::new_v4(),
|
||||
/// ..Default::default()
|
||||
/// }))
|
||||
/// .run();
|
||||
/// ```
|
||||
pub struct NetworkingPlugin {
|
||||
config: NetworkingConfig,
|
||||
}
|
||||
|
||||
impl NetworkingPlugin {
|
||||
/// Create a new networking plugin with custom configuration
|
||||
pub fn new(config: NetworkingConfig) -> Self {
|
||||
Self { config }
|
||||
}
|
||||
|
||||
/// Create a new networking plugin with default configuration
|
||||
pub fn default_with_node_id(node_id: NodeId) -> Self {
|
||||
Self {
|
||||
config: NetworkingConfig {
|
||||
node_id,
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Plugin for NetworkingPlugin {
|
||||
fn build(&self, app: &mut App) {
|
||||
// Add resources
|
||||
app.insert_resource(NodeVectorClock::new(self.config.node_id))
|
||||
.insert_resource(NetworkEntityMap::new())
|
||||
.insert_resource(LastSyncVersions::default())
|
||||
.insert_resource(OperationLog::new())
|
||||
.insert_resource(TombstoneRegistry::new())
|
||||
.insert_resource(EntityLockRegistry::new())
|
||||
.insert_resource(crate::networking::ComponentVectorClocks::new());
|
||||
|
||||
// Startup systems - initialize session from persistence
|
||||
app.add_systems(Startup, initialize_session_system);
|
||||
|
||||
// PreUpdate systems - handle incoming messages first
|
||||
app.add_systems(
|
||||
PreUpdate,
|
||||
(
|
||||
// Register new networked entities
|
||||
register_networked_entities_system,
|
||||
// Central message dispatcher - handles all incoming messages
|
||||
// This replaces the individual message handling systems and
|
||||
// eliminates O(n²) behavior from multiple systems polling the same queue
|
||||
message_dispatcher_system,
|
||||
)
|
||||
.chain(),
|
||||
);
|
||||
|
||||
// Update systems - handle local operations
|
||||
app.add_systems(
|
||||
Update,
|
||||
(
|
||||
// Track Transform changes and mark NetworkedTransform as changed
|
||||
auto_detect_transform_changes_system,
|
||||
// Handle local entity deletions
|
||||
handle_local_deletions_system,
|
||||
// Release locks when entities are deselected
|
||||
release_locks_on_deselection_system,
|
||||
),
|
||||
);
|
||||
|
||||
// PostUpdate systems - generate and send deltas
|
||||
app.add_systems(
|
||||
PostUpdate,
|
||||
(
|
||||
// Generate deltas for changed entities
|
||||
generate_delta_system,
|
||||
// Periodic anti-entropy sync
|
||||
periodic_sync_system,
|
||||
// Maintenance tasks
|
||||
prune_operation_log_system,
|
||||
garbage_collect_tombstones_system,
|
||||
cleanup_expired_locks_system,
|
||||
// Cleanup despawned entities
|
||||
cleanup_despawned_entities_system,
|
||||
),
|
||||
);
|
||||
|
||||
// Broadcast lock heartbeats every 1 second to maintain active locks
|
||||
app.add_systems(
|
||||
PostUpdate,
|
||||
broadcast_lock_heartbeats_system.run_if(bevy::time::common_conditions::on_timer(
|
||||
std::time::Duration::from_secs(1),
|
||||
)),
|
||||
);
|
||||
|
||||
// Auto-save session state every 5 seconds
|
||||
app.add_systems(
|
||||
Last,
|
||||
save_session_on_shutdown_system.run_if(bevy::time::common_conditions::on_timer(
|
||||
std::time::Duration::from_secs(5),
|
||||
)),
|
||||
);
|
||||
|
||||
info!(
|
||||
"NetworkingPlugin initialized for node {}",
|
||||
self.config.node_id
|
||||
);
|
||||
info!(
|
||||
"Sync interval: {}s, Prune interval: {}s, GC interval: {}s",
|
||||
self.config.sync_interval_secs,
|
||||
self.config.prune_interval_secs,
|
||||
self.config.tombstone_gc_interval_secs
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Extension trait for App to add networking more ergonomically
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::NetworkingAppExt;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// App::new()
|
||||
/// .add_plugins(DefaultPlugins)
|
||||
/// .add_networking(Uuid::new_v4())
|
||||
/// .run();
|
||||
/// ```
|
||||
pub trait NetworkingAppExt {
|
||||
/// Add networking with default configuration and specified node ID
|
||||
fn add_networking(&mut self, node_id: NodeId) -> &mut Self;
|
||||
|
||||
/// Add networking with custom configuration
|
||||
fn add_networking_with_config(&mut self, config: NetworkingConfig) -> &mut Self;
|
||||
}
|
||||
|
||||
impl NetworkingAppExt for App {
|
||||
fn add_networking(&mut self, node_id: NodeId) -> &mut Self {
|
||||
self.add_plugins(NetworkingPlugin::default_with_node_id(node_id))
|
||||
}
|
||||
|
||||
fn add_networking_with_config(&mut self, config: NetworkingConfig) -> &mut Self {
|
||||
self.add_plugins(NetworkingPlugin::new(config))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_networking_config_default() {
|
||||
let config = NetworkingConfig::default();
|
||||
assert_eq!(config.sync_interval_secs, 10.0);
|
||||
assert_eq!(config.prune_interval_secs, 60.0);
|
||||
assert_eq!(config.tombstone_gc_interval_secs, 300.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_networking_plugin_creation() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let plugin = NetworkingPlugin::default_with_node_id(node_id);
|
||||
assert_eq!(plugin.config.node_id, node_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_networking_plugin_build() {
|
||||
let mut app = App::new();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
|
||||
app.add_plugins(NetworkingPlugin::default_with_node_id(node_id));
|
||||
|
||||
// Verify resources were added
|
||||
assert!(app.world().get_resource::<NodeVectorClock>().is_some());
|
||||
assert!(app.world().get_resource::<NetworkEntityMap>().is_some());
|
||||
assert!(app.world().get_resource::<LastSyncVersions>().is_some());
|
||||
assert!(app.world().get_resource::<OperationLog>().is_some());
|
||||
assert!(app.world().get_resource::<TombstoneRegistry>().is_some());
|
||||
assert!(app.world().get_resource::<EntityLockRegistry>().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_app_extension_trait() {
|
||||
let mut app = App::new();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
|
||||
app.add_networking(node_id);
|
||||
|
||||
// Verify resources were added
|
||||
assert!(app.world().get_resource::<NodeVectorClock>().is_some());
|
||||
assert!(app.world().get_resource::<NetworkEntityMap>().is_some());
|
||||
}
|
||||
}
|
||||
631
crates/libmarathon/src/networking/rga.rs
Normal file
631
crates/libmarathon/src/networking/rga.rs
Normal file
@@ -0,0 +1,631 @@
|
||||
//! RGA (Replicated Growable Array) CRDT implementation
|
||||
//!
|
||||
//! This module provides a conflict-free replicated sequence that maintains
|
||||
//! consistent ordering across concurrent insert and delete operations.
|
||||
//!
|
||||
//! ## RGA Semantics
|
||||
//!
|
||||
//! - **Causal ordering**: Elements inserted after position P stay after P
|
||||
//! - **Concurrent inserts**: Resolved by timestamp + node ID tiebreaker
|
||||
//! - **Tombstones**: Deleted elements remain in structure to preserve positions
|
||||
//! - **Unique operation IDs**: Each insert gets a UUID for referencing
|
||||
//!
|
||||
//! ## Example
|
||||
//!
|
||||
//! ```
|
||||
//! use libmarathon::networking::Rga;
|
||||
//! use uuid::Uuid;
|
||||
//!
|
||||
//! let node1 = Uuid::new_v4();
|
||||
//! let node2 = Uuid::new_v4();
|
||||
//!
|
||||
//! // Node 1 creates sequence: [A, B]
|
||||
//! let mut seq1: Rga<char> = Rga::new();
|
||||
//! let (id_a, _) = seq1.insert_at_beginning('A', node1);
|
||||
//! let (id_b, _) = seq1.insert_after(Some(id_a), 'B', node1);
|
||||
//!
|
||||
//! // Node 2 concurrently inserts C after A
|
||||
//! let mut seq2 = seq1.clone();
|
||||
//! seq2.insert_after(Some(id_a), 'C', node2);
|
||||
//!
|
||||
//! // Node 1 inserts D after A
|
||||
//! seq1.insert_after(Some(id_a), 'D', node1);
|
||||
//!
|
||||
//! // Merge - concurrent inserts after A are ordered by timestamp + node ID
|
||||
//! seq1.merge(&seq2);
|
||||
//!
|
||||
//! let values: Vec<char> = seq1.values().copied().collect();
|
||||
//! assert_eq!(values.len(), 4); // A, (C or D), (D or C), B
|
||||
//! ```
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use bevy::prelude::*;
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use crate::networking::vector_clock::{
|
||||
NodeId,
|
||||
VectorClock,
|
||||
};
|
||||
|
||||
/// An element in an RGA sequence
|
||||
///
|
||||
/// Each element has a unique ID and tracks its logical position in the sequence
|
||||
/// via the "after" pointer.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RgaElement<T> {
|
||||
/// Unique ID for this element
|
||||
pub id: uuid::Uuid,
|
||||
|
||||
/// The actual value
|
||||
pub value: T,
|
||||
|
||||
/// ID of the element this was inserted after (None = beginning)
|
||||
pub after_id: Option<uuid::Uuid>,
|
||||
|
||||
/// Node that performed the insert
|
||||
pub inserting_node: NodeId,
|
||||
|
||||
/// Vector clock when inserted (for ordering concurrent inserts)
|
||||
pub vector_clock: VectorClock,
|
||||
|
||||
/// Whether this element has been deleted (tombstone)
|
||||
pub is_deleted: bool,
|
||||
}
|
||||
|
||||
/// RGA (Replicated Growable Array) CRDT
|
||||
///
|
||||
/// A replicated sequence supporting concurrent insert/delete with consistent
|
||||
/// ordering based on causal relationships.
|
||||
///
|
||||
/// # Type Parameters
|
||||
///
|
||||
/// - `T`: The element type (must be Clone, Serialize, Deserialize)
|
||||
///
|
||||
/// # Internal Structure
|
||||
///
|
||||
/// Elements are stored in a HashMap by ID. Each element tracks which element
|
||||
/// it was inserted after, forming a linked list structure. Deleted elements
|
||||
/// remain as tombstones to preserve positions for concurrent operations.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Rga<T> {
|
||||
/// Map from element ID to element
|
||||
elements: HashMap<uuid::Uuid, RgaElement<T>>,
|
||||
}
|
||||
|
||||
impl<T> Rga<T>
|
||||
where
|
||||
T: Clone + Serialize + for<'de> Deserialize<'de>,
|
||||
{
|
||||
/// Create a new empty RGA sequence
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
elements: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert an element at the beginning of the sequence
|
||||
///
|
||||
/// Returns (element_id, position) where position is the index in the
|
||||
/// visible sequence.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::Rga;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node = Uuid::new_v4();
|
||||
/// let mut seq: Rga<char> = Rga::new();
|
||||
///
|
||||
/// let (id, pos) = seq.insert_at_beginning('A', node);
|
||||
/// assert_eq!(pos, 0);
|
||||
/// ```
|
||||
pub fn insert_at_beginning(&mut self, value: T, node_id: NodeId) -> (uuid::Uuid, usize) {
|
||||
let id = uuid::Uuid::new_v4();
|
||||
let mut clock = VectorClock::new();
|
||||
clock.increment(node_id);
|
||||
|
||||
let element = RgaElement {
|
||||
id,
|
||||
value,
|
||||
after_id: None,
|
||||
inserting_node: node_id,
|
||||
vector_clock: clock,
|
||||
is_deleted: false,
|
||||
};
|
||||
|
||||
self.elements.insert(id, element);
|
||||
|
||||
(id, 0)
|
||||
}
|
||||
|
||||
/// Insert an element after a specific element ID
|
||||
///
|
||||
/// If after_id is None, inserts at the beginning.
|
||||
///
|
||||
/// Returns (element_id, position) where position is the index in the
|
||||
/// visible sequence.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::Rga;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node = Uuid::new_v4();
|
||||
/// let mut seq: Rga<char> = Rga::new();
|
||||
///
|
||||
/// let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||
/// let (id_b, pos) = seq.insert_after(Some(id_a), 'B', node);
|
||||
/// assert_eq!(pos, 1);
|
||||
///
|
||||
/// let values: Vec<char> = seq.values().copied().collect();
|
||||
/// assert_eq!(values, vec!['A', 'B']);
|
||||
/// ```
|
||||
pub fn insert_after(
|
||||
&mut self,
|
||||
after_id: Option<uuid::Uuid>,
|
||||
value: T,
|
||||
node_id: NodeId,
|
||||
) -> (uuid::Uuid, usize) {
|
||||
let id = uuid::Uuid::new_v4();
|
||||
let mut clock = VectorClock::new();
|
||||
clock.increment(node_id);
|
||||
|
||||
let element = RgaElement {
|
||||
id,
|
||||
value,
|
||||
after_id,
|
||||
inserting_node: node_id,
|
||||
vector_clock: clock,
|
||||
is_deleted: false,
|
||||
};
|
||||
|
||||
self.elements.insert(id, element);
|
||||
|
||||
// Calculate position
|
||||
let position = self.calculate_position(id);
|
||||
|
||||
(id, position)
|
||||
}
|
||||
|
||||
/// Insert an element with explicit vector clock
|
||||
///
|
||||
/// This is used when applying remote operations that already have
|
||||
/// a vector clock.
|
||||
pub fn insert_with_clock(
|
||||
&mut self,
|
||||
id: uuid::Uuid,
|
||||
after_id: Option<uuid::Uuid>,
|
||||
value: T,
|
||||
node_id: NodeId,
|
||||
vector_clock: VectorClock,
|
||||
) -> usize {
|
||||
let element = RgaElement {
|
||||
id,
|
||||
value,
|
||||
after_id,
|
||||
inserting_node: node_id,
|
||||
vector_clock,
|
||||
is_deleted: false,
|
||||
};
|
||||
|
||||
self.elements.insert(id, element);
|
||||
|
||||
self.calculate_position(id)
|
||||
}
|
||||
|
||||
/// Delete an element by ID
|
||||
///
|
||||
/// The element becomes a tombstone - it remains in the structure but
|
||||
/// is hidden from the visible sequence.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::Rga;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node = Uuid::new_v4();
|
||||
/// let mut seq: Rga<char> = Rga::new();
|
||||
///
|
||||
/// let (id, _) = seq.insert_at_beginning('A', node);
|
||||
/// assert_eq!(seq.len(), 1);
|
||||
///
|
||||
/// seq.delete(id);
|
||||
/// assert_eq!(seq.len(), 0);
|
||||
/// assert!(seq.is_deleted(id));
|
||||
/// ```
|
||||
pub fn delete(&mut self, element_id: uuid::Uuid) {
|
||||
if let Some(element) = self.elements.get_mut(&element_id) {
|
||||
element.is_deleted = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an element is deleted
|
||||
pub fn is_deleted(&self, element_id: uuid::Uuid) -> bool {
|
||||
self.elements
|
||||
.get(&element_id)
|
||||
.map(|e| e.is_deleted)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Get the visible length of the sequence (excluding tombstones)
|
||||
pub fn len(&self) -> usize {
|
||||
self.elements.values().filter(|e| !e.is_deleted).count()
|
||||
}
|
||||
|
||||
/// Check if the sequence is empty (no visible elements)
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
/// Get all visible values in order
|
||||
///
|
||||
/// Returns an iterator over the values in their proper sequence order.
|
||||
pub fn values(&self) -> impl Iterator<Item = &T> {
|
||||
let ordered = self.get_ordered_elements();
|
||||
ordered.into_iter().filter_map(move |id| {
|
||||
self.elements
|
||||
.get(&id)
|
||||
.and_then(|e| if !e.is_deleted { Some(&e.value) } else { None })
|
||||
})
|
||||
}
|
||||
|
||||
/// Get all visible elements with their IDs in order
|
||||
pub fn elements_with_ids(&self) -> Vec<(uuid::Uuid, &T)> {
|
||||
let ordered = self.get_ordered_elements();
|
||||
ordered
|
||||
.into_iter()
|
||||
.filter_map(|id| {
|
||||
self.elements.get(&id).and_then(|e| {
|
||||
if !e.is_deleted {
|
||||
Some((id, &e.value))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Merge another RGA into this one
|
||||
///
|
||||
/// Implements CRDT merge by combining all elements from both sequences
|
||||
/// and resolving positions based on causal ordering.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::Rga;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node1 = Uuid::new_v4();
|
||||
/// let node2 = Uuid::new_v4();
|
||||
///
|
||||
/// let mut seq1: Rga<char> = Rga::new();
|
||||
/// seq1.insert_at_beginning('A', node1);
|
||||
///
|
||||
/// let mut seq2: Rga<char> = Rga::new();
|
||||
/// seq2.insert_at_beginning('B', node2);
|
||||
///
|
||||
/// seq1.merge(&seq2);
|
||||
/// assert_eq!(seq1.len(), 2);
|
||||
/// ```
|
||||
pub fn merge(&mut self, other: &Rga<T>) {
|
||||
for (id, element) in &other.elements {
|
||||
// Insert or update element
|
||||
self.elements
|
||||
.entry(*id)
|
||||
.and_modify(|existing| {
|
||||
// If other's element is deleted, mark ours as deleted too
|
||||
if element.is_deleted {
|
||||
existing.is_deleted = true;
|
||||
}
|
||||
})
|
||||
.or_insert_with(|| element.clone());
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear the sequence
|
||||
///
|
||||
/// Removes all elements and tombstones.
|
||||
pub fn clear(&mut self) {
|
||||
self.elements.clear();
|
||||
}
|
||||
|
||||
/// Garbage collect tombstones
|
||||
///
|
||||
/// Removes deleted elements that have no children (nothing inserted after
|
||||
/// them). This is safe because if no element references a tombstone as
|
||||
/// its parent, it can be removed without affecting the sequence.
|
||||
pub fn garbage_collect(&mut self) {
|
||||
// Find all IDs that are referenced as after_id
|
||||
let mut referenced_ids = std::collections::HashSet::new();
|
||||
for element in self.elements.values() {
|
||||
if let Some(after_id) = element.after_id {
|
||||
referenced_ids.insert(after_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove deleted elements that aren't referenced
|
||||
self.elements
|
||||
.retain(|id, element| !element.is_deleted || referenced_ids.contains(id));
|
||||
}
|
||||
|
||||
/// Get ordered list of element IDs
|
||||
///
|
||||
/// This builds the proper sequence order by following the after_id pointers
|
||||
/// and resolving concurrent inserts using vector clocks + node IDs.
|
||||
fn get_ordered_elements(&self) -> Vec<uuid::Uuid> {
|
||||
// Build a map of after_id -> list of elements inserted after it
|
||||
let mut children: HashMap<Option<uuid::Uuid>, Vec<uuid::Uuid>> = HashMap::new();
|
||||
|
||||
for (id, element) in &self.elements {
|
||||
children
|
||||
.entry(element.after_id)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(*id);
|
||||
}
|
||||
|
||||
// Sort children by vector clock, then node ID (for deterministic ordering)
|
||||
for child_list in children.values_mut() {
|
||||
child_list.sort_by(|a, b| {
|
||||
let elem_a = &self.elements[a];
|
||||
let elem_b = &self.elements[b];
|
||||
|
||||
// Compare vector clocks
|
||||
match elem_a.vector_clock.compare(&elem_b.vector_clock) {
|
||||
| Ok(std::cmp::Ordering::Less) => std::cmp::Ordering::Less,
|
||||
| Ok(std::cmp::Ordering::Greater) => std::cmp::Ordering::Greater,
|
||||
| Ok(std::cmp::Ordering::Equal) | Err(_) => {
|
||||
// If clocks are equal or concurrent, use node ID as tiebreaker
|
||||
elem_a.inserting_node.cmp(&elem_b.inserting_node)
|
||||
},
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Build ordered list by traversing from None (beginning)
|
||||
let mut result = Vec::new();
|
||||
let mut to_visit = vec![None];
|
||||
|
||||
while let Some(current_id) = to_visit.pop() {
|
||||
if let Some(child_ids) = children.get(¤t_id) {
|
||||
// Visit children in reverse order (since we're using a stack)
|
||||
for child_id in child_ids.iter().rev() {
|
||||
result.push(*child_id);
|
||||
to_visit.push(Some(*child_id));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Calculate the visible position of an element
|
||||
fn calculate_position(&self, element_id: uuid::Uuid) -> usize {
|
||||
let ordered = self.get_ordered_elements();
|
||||
ordered.iter().position(|id| id == &element_id).unwrap_or(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for Rga<T>
|
||||
where
|
||||
T: Clone + Serialize + for<'de> Deserialize<'de>,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_rga_new() {
|
||||
let seq: Rga<char> = Rga::new();
|
||||
assert!(seq.is_empty());
|
||||
assert_eq!(seq.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_insert_at_beginning() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut seq: Rga<char> = Rga::new();
|
||||
|
||||
let (_, pos) = seq.insert_at_beginning('A', node);
|
||||
assert_eq!(pos, 0);
|
||||
assert_eq!(seq.len(), 1);
|
||||
|
||||
let values: Vec<char> = seq.values().copied().collect();
|
||||
assert_eq!(values, vec!['A']);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_insert_after() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut seq: Rga<char> = Rga::new();
|
||||
|
||||
let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||
let (_, pos_b) = seq.insert_after(Some(id_a), 'B', node);
|
||||
assert_eq!(pos_b, 1);
|
||||
|
||||
let values: Vec<char> = seq.values().copied().collect();
|
||||
assert_eq!(values, vec!['A', 'B']);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_delete() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut seq: Rga<char> = Rga::new();
|
||||
|
||||
let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||
let (id_b, _) = seq.insert_after(Some(id_a), 'B', node);
|
||||
|
||||
assert_eq!(seq.len(), 2);
|
||||
|
||||
seq.delete(id_a);
|
||||
assert_eq!(seq.len(), 1);
|
||||
assert!(seq.is_deleted(id_a));
|
||||
|
||||
let values: Vec<char> = seq.values().copied().collect();
|
||||
assert_eq!(values, vec!['B']);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_insert_delete_insert() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut seq: Rga<char> = Rga::new();
|
||||
|
||||
let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||
seq.delete(id_a);
|
||||
assert_eq!(seq.len(), 0);
|
||||
|
||||
seq.insert_at_beginning('B', node);
|
||||
assert_eq!(seq.len(), 1);
|
||||
|
||||
let values: Vec<char> = seq.values().copied().collect();
|
||||
assert_eq!(values, vec!['B']);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_merge_simple() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut seq1: Rga<char> = Rga::new();
|
||||
seq1.insert_at_beginning('A', node1);
|
||||
|
||||
let mut seq2: Rga<char> = Rga::new();
|
||||
seq2.insert_at_beginning('B', node2);
|
||||
|
||||
seq1.merge(&seq2);
|
||||
assert_eq!(seq1.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_merge_preserves_order() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
|
||||
let mut seq1: Rga<char> = Rga::new();
|
||||
let (id_a, _) = seq1.insert_at_beginning('A', node);
|
||||
let (id_b, _) = seq1.insert_after(Some(id_a), 'B', node);
|
||||
seq1.insert_after(Some(id_b), 'C', node);
|
||||
|
||||
let seq2 = seq1.clone();
|
||||
|
||||
seq1.merge(&seq2);
|
||||
|
||||
let values: Vec<char> = seq1.values().copied().collect();
|
||||
assert_eq!(values, vec!['A', 'B', 'C']);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_merge_deletion() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
|
||||
let mut seq1: Rga<char> = Rga::new();
|
||||
let (id_a, _) = seq1.insert_at_beginning('A', node);
|
||||
seq1.insert_after(Some(id_a), 'B', node);
|
||||
|
||||
let mut seq2 = seq1.clone();
|
||||
seq2.delete(id_a);
|
||||
|
||||
seq1.merge(&seq2);
|
||||
|
||||
let values: Vec<char> = seq1.values().copied().collect();
|
||||
assert_eq!(values, vec!['B']);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_concurrent_inserts() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
// Both start with [A]
|
||||
let mut seq1: Rga<char> = Rga::new();
|
||||
let (id_a, _) = seq1.insert_at_beginning('A', node1);
|
||||
|
||||
let mut seq2 = seq1.clone();
|
||||
|
||||
// seq1 inserts B after A
|
||||
seq1.insert_after(Some(id_a), 'B', node1);
|
||||
|
||||
// seq2 inserts C after A (concurrent)
|
||||
seq2.insert_after(Some(id_a), 'C', node2);
|
||||
|
||||
// Merge
|
||||
seq1.merge(&seq2);
|
||||
|
||||
// Should have A followed by B and C in some deterministic order
|
||||
assert_eq!(seq1.len(), 3);
|
||||
|
||||
let values: Vec<char> = seq1.values().copied().collect();
|
||||
assert_eq!(values[0], 'A');
|
||||
assert!(values.contains(&'B'));
|
||||
assert!(values.contains(&'C'));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_clear() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut seq: Rga<char> = Rga::new();
|
||||
|
||||
seq.insert_at_beginning('A', node);
|
||||
seq.insert_at_beginning('B', node);
|
||||
assert_eq!(seq.len(), 2);
|
||||
|
||||
seq.clear();
|
||||
assert!(seq.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_garbage_collect() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut seq: Rga<char> = Rga::new();
|
||||
|
||||
let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||
let (id_b, _) = seq.insert_after(Some(id_a), 'B', node);
|
||||
let (_, _) = seq.insert_after(Some(id_b), 'C', node);
|
||||
|
||||
// Delete A (has child B, so should be kept)
|
||||
seq.delete(id_a);
|
||||
|
||||
// Delete B (has child C, so should be kept)
|
||||
seq.delete(id_b);
|
||||
|
||||
assert_eq!(seq.elements.len(), 3);
|
||||
|
||||
seq.garbage_collect();
|
||||
|
||||
// A and B should still be there (referenced by children)
|
||||
// Only C is visible
|
||||
assert_eq!(seq.len(), 1);
|
||||
assert!(seq.elements.contains_key(&id_a));
|
||||
assert!(seq.elements.contains_key(&id_b));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rga_serialization() -> bincode::Result<()> {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut seq: Rga<String> = Rga::new();
|
||||
|
||||
let (id_a, _) = seq.insert_at_beginning("foo".to_string(), node);
|
||||
seq.insert_after(Some(id_a), "bar".to_string(), node);
|
||||
|
||||
let bytes = bincode::serialize(&seq)?;
|
||||
let deserialized: Rga<String> = bincode::deserialize(&bytes)?;
|
||||
|
||||
assert_eq!(deserialized.len(), 2);
|
||||
let values: Vec<String> = deserialized.values().cloned().collect();
|
||||
assert_eq!(values, vec!["foo".to_string(), "bar".to_string()]);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
465
crates/libmarathon/src/networking/session.rs
Normal file
465
crates/libmarathon/src/networking/session.rs
Normal file
@@ -0,0 +1,465 @@
|
||||
use std::fmt;
|
||||
|
||||
///! Session identification and lifecycle management
|
||||
///!
|
||||
///! This module provides session-scoped collaborative sessions with
|
||||
/// human-readable ! session codes, ALPN-based network isolation, and persistent
|
||||
/// session tracking.
|
||||
use bevy::prelude::*;
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::networking::VectorClock;
|
||||
|
||||
/// Session identifier - UUID internally, human-readable code for display
|
||||
///
|
||||
/// Session IDs provide both technical uniqueness (UUID) and human usability
|
||||
/// (abc-def-123 codes). All peers in a session share the same session ID.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct SessionId {
|
||||
uuid: Uuid,
|
||||
code: String,
|
||||
}
|
||||
|
||||
impl SessionId {
|
||||
/// Create a new random session ID
|
||||
pub fn new() -> Self {
|
||||
// Generate a random 9-character code
|
||||
use rand::Rng;
|
||||
const CHARSET: &[u8] = b"abcdefghjkmnpqrstuvwxyz23456789";
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
let mut code = String::with_capacity(11);
|
||||
for i in 0..9 {
|
||||
let idx = rng.gen_range(0..CHARSET.len());
|
||||
code.push(CHARSET[idx] as char);
|
||||
if i == 2 || i == 5 {
|
||||
code.push('-');
|
||||
}
|
||||
}
|
||||
|
||||
// Hash the code to get a UUID
|
||||
let mut hasher = blake3::Hasher::new();
|
||||
hasher.update(b"/app/v1/session-code/");
|
||||
hasher.update(code.as_bytes());
|
||||
let hash = hasher.finalize();
|
||||
|
||||
let mut uuid_bytes = [0u8; 16];
|
||||
uuid_bytes.copy_from_slice(&hash.as_bytes()[..16]);
|
||||
let uuid = Uuid::from_bytes(uuid_bytes);
|
||||
|
||||
Self { uuid, code }
|
||||
}
|
||||
|
||||
/// Parse a session code (format: abc-def-123)
|
||||
///
|
||||
/// Hashes the code to derive a deterministic UUID.
|
||||
/// Returns error if code format is invalid.
|
||||
pub fn from_code(code: &str) -> Result<Self, SessionError> {
|
||||
// Validate format: xxx-yyy-zzz (11 chars total: 3 + dash + 3 + dash + 3)
|
||||
if code.len() != 11 {
|
||||
return Err(SessionError::InvalidCodeFormat);
|
||||
}
|
||||
|
||||
// Check dashes at positions 3 and 7
|
||||
let chars: Vec<char> = code.chars().collect();
|
||||
if chars.len() != 11 || chars[3] != '-' || chars[7] != '-' {
|
||||
return Err(SessionError::InvalidCodeFormat);
|
||||
}
|
||||
|
||||
// Validate all characters are in the charset
|
||||
const CHARSET: &str = "abcdefghjkmnpqrstuvwxyz23456789-";
|
||||
let code_lower = code.to_lowercase();
|
||||
if !code_lower.chars().all(|c| CHARSET.contains(c)) {
|
||||
return Err(SessionError::InvalidCodeFormat);
|
||||
}
|
||||
|
||||
// Hash the code to get a UUID (deterministic)
|
||||
let mut hasher = blake3::Hasher::new();
|
||||
hasher.update(b"/app/v1/session-code/");
|
||||
hasher.update(code_lower.as_bytes());
|
||||
let hash = hasher.finalize();
|
||||
|
||||
let mut uuid_bytes = [0u8; 16];
|
||||
uuid_bytes.copy_from_slice(&hash.as_bytes()[..16]);
|
||||
let uuid = Uuid::from_bytes(uuid_bytes);
|
||||
|
||||
Ok(Self {
|
||||
uuid,
|
||||
code: code_lower,
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert to human-readable code (abc-def-123 format)
|
||||
pub fn to_code(&self) -> &str {
|
||||
&self.code
|
||||
}
|
||||
|
||||
/// Derive ALPN identifier for network isolation
|
||||
///
|
||||
/// Computes deterministic 32-byte BLAKE3 hash from session UUID.
|
||||
/// All peers independently compute the same ALPN from session code.
|
||||
///
|
||||
/// # Security
|
||||
/// The domain separation prefix (`/app/v1/session-id/`) ensures ALPNs
|
||||
/// cannot collide with other protocol uses of the same hash space.
|
||||
pub fn to_alpn(&self) -> [u8; 32] {
|
||||
let mut hasher = blake3::Hasher::new();
|
||||
hasher.update(b"/app/v1/session-id/");
|
||||
hasher.update(self.uuid.as_bytes());
|
||||
|
||||
let hash = hasher.finalize();
|
||||
*hash.as_bytes()
|
||||
}
|
||||
|
||||
/// Get raw UUID
|
||||
pub fn as_uuid(&self) -> &Uuid {
|
||||
&self.uuid
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SessionId {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SessionId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", &self.code)
|
||||
}
|
||||
}
|
||||
|
||||
/// Session lifecycle states
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum SessionState {
|
||||
/// Session exists in database but hasn't connected to network yet
|
||||
Created,
|
||||
/// Currently attempting to join network and sync state
|
||||
Joining,
|
||||
/// Fully synchronized and actively collaborating
|
||||
Active,
|
||||
/// Temporarily offline, will attempt to rejoin when network restored
|
||||
Disconnected,
|
||||
/// User explicitly left the session (clean shutdown)
|
||||
Left,
|
||||
}
|
||||
|
||||
impl fmt::Display for SessionState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
| SessionState::Created => write!(f, "created"),
|
||||
| SessionState::Joining => write!(f, "joining"),
|
||||
| SessionState::Active => write!(f, "active"),
|
||||
| SessionState::Disconnected => write!(f, "disconnected"),
|
||||
| SessionState::Left => write!(f, "left"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionState {
|
||||
/// Parse from string representation
|
||||
pub fn from_str(s: &str) -> Option<Self> {
|
||||
match s {
|
||||
| "created" => Some(SessionState::Created),
|
||||
| "joining" => Some(SessionState::Joining),
|
||||
| "active" => Some(SessionState::Active),
|
||||
| "disconnected" => Some(SessionState::Disconnected),
|
||||
| "left" => Some(SessionState::Left),
|
||||
| _ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Session metadata
|
||||
///
|
||||
/// Tracks session identity, creation time, entity count, and lifecycle state.
|
||||
/// Persisted to database for crash recovery and auto-rejoin.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Session {
|
||||
/// Unique session identifier
|
||||
pub id: SessionId,
|
||||
|
||||
/// Optional human-readable name
|
||||
pub name: Option<String>,
|
||||
|
||||
/// When the session was created (Unix timestamp)
|
||||
pub created_at: i64,
|
||||
|
||||
/// When this node was last active in the session (Unix timestamp)
|
||||
pub last_active: i64,
|
||||
|
||||
/// Cached count of entities in this session
|
||||
pub entity_count: usize,
|
||||
|
||||
/// Current lifecycle state
|
||||
pub state: SessionState,
|
||||
|
||||
/// Optional encrypted session secret for access control
|
||||
pub secret: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl Session {
|
||||
/// Create a new session with default values
|
||||
pub fn new(id: SessionId) -> Self {
|
||||
let now = chrono::Utc::now().timestamp();
|
||||
Self {
|
||||
id,
|
||||
name: None,
|
||||
created_at: now,
|
||||
last_active: now,
|
||||
entity_count: 0,
|
||||
state: SessionState::Created,
|
||||
secret: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the last active timestamp to current time
|
||||
pub fn touch(&mut self) {
|
||||
self.last_active = chrono::Utc::now().timestamp();
|
||||
}
|
||||
|
||||
/// Transition to a new state and update last active time
|
||||
pub fn transition_to(&mut self, new_state: SessionState) {
|
||||
tracing::info!(
|
||||
"Session {} transitioning: {:?} -> {:?}",
|
||||
self.id,
|
||||
self.state,
|
||||
new_state
|
||||
);
|
||||
self.state = new_state;
|
||||
self.touch();
|
||||
}
|
||||
}
|
||||
|
||||
/// Current session resource for Bevy ECS
|
||||
///
|
||||
/// Contains both session metadata and the vector clock snapshot from when
|
||||
/// we joined (for hybrid sync protocol).
|
||||
#[derive(Resource, Clone)]
|
||||
pub struct CurrentSession {
|
||||
/// Session metadata
|
||||
pub session: Session,
|
||||
|
||||
/// Vector clock when we last left/joined this session
|
||||
/// Used for hybrid sync to request only missing deltas
|
||||
pub last_known_clock: VectorClock,
|
||||
}
|
||||
|
||||
impl CurrentSession {
|
||||
/// Create a new current session
|
||||
pub fn new(session: Session, last_known_clock: VectorClock) -> Self {
|
||||
Self {
|
||||
session,
|
||||
last_known_clock,
|
||||
}
|
||||
}
|
||||
|
||||
/// Transition the session to a new state
|
||||
pub fn transition_to(&mut self, new_state: SessionState) {
|
||||
self.session.transition_to(new_state);
|
||||
}
|
||||
}
|
||||
|
||||
/// Session-related errors
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum SessionError {
|
||||
#[error("Invalid session code format (expected: abc-def-123)")]
|
||||
InvalidCodeFormat,
|
||||
|
||||
#[error("Session not found")]
|
||||
NotFound,
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
Database(String),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_session_id_creation() {
|
||||
let id1 = SessionId::new();
|
||||
let id2 = SessionId::new();
|
||||
|
||||
// Different session IDs should be different
|
||||
assert_ne!(id1, id2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_code_roundtrip() {
|
||||
let id = SessionId::new();
|
||||
let code = id.to_code();
|
||||
|
||||
// Code should be 11 characters: xxx-yyy-zzz
|
||||
assert_eq!(code.len(), 11);
|
||||
assert_eq!(&code[3..4], "-");
|
||||
assert_eq!(&code[7..8], "-");
|
||||
|
||||
// Parse back
|
||||
let parsed = SessionId::from_code(&code).expect("Failed to parse code");
|
||||
|
||||
// Should get same session ID
|
||||
assert_eq!(id, parsed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_code_deterministic() {
|
||||
// Same code should always produce same SessionId
|
||||
let code = "abc-def-234";
|
||||
let id1 = SessionId::from_code(code).unwrap();
|
||||
let id2 = SessionId::from_code(code).unwrap();
|
||||
|
||||
assert_eq!(id1, id2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_code_case_insensitive() {
|
||||
// Codes should be case-insensitive
|
||||
let id1 = SessionId::from_code("abc-def-234").unwrap();
|
||||
let id2 = SessionId::from_code("ABC-DEF-234").unwrap();
|
||||
|
||||
assert_eq!(id1, id2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_code_invalid_format() {
|
||||
// Too short
|
||||
assert!(SessionId::from_code("abc-def").is_err());
|
||||
|
||||
// Too long
|
||||
assert!(SessionId::from_code("abc-def-1234").is_err());
|
||||
|
||||
// Missing dash
|
||||
assert!(SessionId::from_code("abcdef-123").is_err());
|
||||
assert!(SessionId::from_code("abc-def123").is_err());
|
||||
|
||||
// Wrong dash positions
|
||||
assert!(SessionId::from_code("ab-cdef-123").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_alpn_derivation_deterministic() {
|
||||
// Same session ID should always produce same ALPN
|
||||
let id = SessionId::new();
|
||||
let alpn1 = id.to_alpn();
|
||||
let alpn2 = id.to_alpn();
|
||||
|
||||
assert_eq!(alpn1, alpn2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_alpn_derivation_unique() {
|
||||
// Different session IDs should produce different ALPNs
|
||||
let id1 = SessionId::new();
|
||||
let id2 = SessionId::new();
|
||||
|
||||
let alpn1 = id1.to_alpn();
|
||||
let alpn2 = id2.to_alpn();
|
||||
|
||||
assert_ne!(alpn1, alpn2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_alpn_length() {
|
||||
// ALPN should always be 32 bytes
|
||||
let id = SessionId::new();
|
||||
let alpn = id.to_alpn();
|
||||
assert_eq!(alpn.len(), 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_state_display() {
|
||||
assert_eq!(SessionState::Created.to_string(), "created");
|
||||
assert_eq!(SessionState::Joining.to_string(), "joining");
|
||||
assert_eq!(SessionState::Active.to_string(), "active");
|
||||
assert_eq!(SessionState::Disconnected.to_string(), "disconnected");
|
||||
assert_eq!(SessionState::Left.to_string(), "left");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_state_from_str() {
|
||||
assert_eq!(
|
||||
SessionState::from_str("created"),
|
||||
Some(SessionState::Created)
|
||||
);
|
||||
assert_eq!(
|
||||
SessionState::from_str("joining"),
|
||||
Some(SessionState::Joining)
|
||||
);
|
||||
assert_eq!(SessionState::from_str("active"), Some(SessionState::Active));
|
||||
assert_eq!(
|
||||
SessionState::from_str("disconnected"),
|
||||
Some(SessionState::Disconnected)
|
||||
);
|
||||
assert_eq!(SessionState::from_str("left"), Some(SessionState::Left));
|
||||
assert_eq!(SessionState::from_str("invalid"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_creation() {
|
||||
let id = SessionId::new();
|
||||
let session = Session::new(id.clone());
|
||||
|
||||
assert_eq!(session.id, id);
|
||||
assert_eq!(session.name, None);
|
||||
assert_eq!(session.entity_count, 0);
|
||||
assert_eq!(session.state, SessionState::Created);
|
||||
assert_eq!(session.secret, None);
|
||||
assert!(session.created_at > 0);
|
||||
assert_eq!(session.created_at, session.last_active);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_transition() {
|
||||
let id = SessionId::new();
|
||||
let mut session = Session::new(id);
|
||||
|
||||
let initial_state = session.state;
|
||||
let initial_time = session.last_active;
|
||||
|
||||
session.transition_to(SessionState::Joining);
|
||||
|
||||
assert_ne!(session.state, initial_state);
|
||||
assert_eq!(session.state, SessionState::Joining);
|
||||
// Timestamp should be updated (greater or equal due to precision)
|
||||
assert!(session.last_active >= initial_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_display() {
|
||||
let id = SessionId::new();
|
||||
let code = id.to_code();
|
||||
let display = format!("{}", id);
|
||||
|
||||
assert_eq!(code, &display);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_current_session_creation() {
|
||||
let id = SessionId::new();
|
||||
let session = Session::new(id);
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let current = CurrentSession::new(session.clone(), clock);
|
||||
|
||||
assert_eq!(current.session.id, session.id);
|
||||
assert_eq!(current.session.state, SessionState::Created);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_current_session_transition() {
|
||||
let id = SessionId::new();
|
||||
let session = Session::new(id);
|
||||
let clock = VectorClock::new();
|
||||
|
||||
let mut current = CurrentSession::new(session, clock);
|
||||
current.transition_to(SessionState::Active);
|
||||
|
||||
assert_eq!(current.session.state, SessionState::Active);
|
||||
}
|
||||
}
|
||||
260
crates/libmarathon/src/networking/session_lifecycle.rs
Normal file
260
crates/libmarathon/src/networking/session_lifecycle.rs
Normal file
@@ -0,0 +1,260 @@
|
||||
//! Session lifecycle management - startup and shutdown
|
||||
//!
|
||||
//! This module handles automatic session restoration on startup and clean
|
||||
//! session persistence on shutdown. It enables seamless auto-rejoin after
|
||||
//! app restarts.
|
||||
//!
|
||||
//! # Lifecycle Flow
|
||||
//!
|
||||
//! **Startup:**
|
||||
//! 1. Check database for last active session
|
||||
//! 2. If found and state is Active/Disconnected → auto-rejoin
|
||||
//! 3. Load last known vector clock for hybrid sync
|
||||
//! 4. Insert CurrentSession resource
|
||||
//!
|
||||
//! **Shutdown:**
|
||||
//! 1. Update session metadata (state, last_active, entity_count)
|
||||
//! 2. Save session to database
|
||||
//! 3. Save current vector clock
|
||||
//! 4. Mark clean shutdown in database
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::{
|
||||
networking::{
|
||||
CurrentSession,
|
||||
Session,
|
||||
SessionId,
|
||||
SessionState,
|
||||
VectorClock,
|
||||
delta_generation::NodeVectorClock,
|
||||
},
|
||||
persistence::{
|
||||
PersistenceDb,
|
||||
get_last_active_session,
|
||||
load_session_vector_clock,
|
||||
save_session,
|
||||
save_session_vector_clock,
|
||||
},
|
||||
};
|
||||
|
||||
/// System to initialize or restore session on startup
|
||||
///
|
||||
/// This system runs once at startup and either:
|
||||
/// - Restores the last active session (auto-rejoin)
|
||||
/// - Creates a new session
|
||||
///
|
||||
/// Add to your app as a Startup system AFTER setup_persistence:
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::initialize_session_system;
|
||||
///
|
||||
/// App::new()
|
||||
/// .add_systems(Startup, initialize_session_system);
|
||||
/// ```
|
||||
pub fn initialize_session_system(world: &mut World) {
|
||||
info!("Initializing session...");
|
||||
|
||||
// Load session data in a scoped block to release the database lock
|
||||
let session_data: Option<(Session, VectorClock)> = {
|
||||
// Get database connection
|
||||
let db = match world.get_resource::<PersistenceDb>() {
|
||||
| Some(db) => db,
|
||||
| None => {
|
||||
error!("PersistenceDb resource not found - cannot initialize session");
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
// Lock the database connection
|
||||
let conn = match db.conn.lock() {
|
||||
| Ok(conn) => conn,
|
||||
| Err(e) => {
|
||||
error!("Failed to lock database connection: {}", e);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
// Try to load last active session
|
||||
match get_last_active_session(&conn) {
|
||||
| Ok(Some(mut session)) => {
|
||||
// Check if we should auto-rejoin
|
||||
match session.state {
|
||||
| SessionState::Active | SessionState::Disconnected => {
|
||||
info!(
|
||||
"Found previous session {} in state {:?} - attempting auto-rejoin",
|
||||
session.id, session.state
|
||||
);
|
||||
|
||||
// Load last known vector clock
|
||||
let last_known_clock = match load_session_vector_clock(&conn, session.id.clone()) {
|
||||
| Ok(clock) => clock,
|
||||
| Err(e) => {
|
||||
warn!(
|
||||
"Failed to load vector clock for session {}: {} - using empty clock",
|
||||
session.id, e
|
||||
);
|
||||
VectorClock::new()
|
||||
},
|
||||
};
|
||||
|
||||
// Transition to Joining state
|
||||
session.transition_to(SessionState::Joining);
|
||||
|
||||
Some((session, last_known_clock))
|
||||
},
|
||||
|
||||
| _ => {
|
||||
// For Created, Left, or Joining states, create new session
|
||||
None
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
| Ok(None) => None,
|
||||
| Err(e) => {
|
||||
error!("Failed to load last active session: {}", e);
|
||||
None
|
||||
},
|
||||
}
|
||||
}; // conn and db are dropped here, releasing the lock
|
||||
|
||||
// Now insert the session resource (no longer holding database lock)
|
||||
let current_session = match session_data {
|
||||
| Some((session, last_known_clock)) => {
|
||||
info!("Session initialized for auto-rejoin");
|
||||
CurrentSession::new(session, last_known_clock)
|
||||
},
|
||||
| None => {
|
||||
info!("Creating new session");
|
||||
let session_id = SessionId::new();
|
||||
let session = Session::new(session_id);
|
||||
CurrentSession::new(session, VectorClock::new())
|
||||
},
|
||||
};
|
||||
|
||||
world.insert_resource(current_session);
|
||||
}
|
||||
|
||||
/// System to auto-save session state periodically
|
||||
///
|
||||
/// This system periodically saves session state to persist it for auto-rejoin
|
||||
/// on next startup. Typically run every 5 seconds.
|
||||
///
|
||||
/// Add to your app using the Last schedule with a timer:
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use bevy::time::common_conditions::on_timer;
|
||||
/// use libmarathon::networking::save_session_on_shutdown_system;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// App::new()
|
||||
/// .add_systems(Last, save_session_on_shutdown_system
|
||||
/// .run_if(on_timer(Duration::from_secs(5))));
|
||||
/// ```
|
||||
pub fn save_session_on_shutdown_system(world: &mut World) {
|
||||
debug!("Auto-saving session state...");
|
||||
|
||||
// Get current session
|
||||
let current_session = match world.get_resource::<CurrentSession>() {
|
||||
| Some(session) => session.clone(),
|
||||
| None => {
|
||||
warn!("No CurrentSession found - skipping session save");
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
let mut session = current_session.session.clone();
|
||||
|
||||
// Update session metadata
|
||||
session.touch();
|
||||
session.transition_to(SessionState::Left);
|
||||
|
||||
// Count entities in the world
|
||||
let entity_count = world
|
||||
.query::<&crate::networking::NetworkedEntity>()
|
||||
.iter(world)
|
||||
.count();
|
||||
session.entity_count = entity_count;
|
||||
|
||||
// Get current vector clock
|
||||
let vector_clock = world
|
||||
.get_resource::<NodeVectorClock>()
|
||||
.map(|nc| nc.clock.clone());
|
||||
|
||||
// Save to database in a scoped block
|
||||
{
|
||||
// Get database connection
|
||||
let db = match world.get_resource::<PersistenceDb>() {
|
||||
| Some(db) => db,
|
||||
| None => {
|
||||
error!("PersistenceDb resource not found - cannot save session");
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
// Lock the database connection
|
||||
let mut conn = match db.conn.lock() {
|
||||
| Ok(conn) => conn,
|
||||
| Err(e) => {
|
||||
error!("Failed to lock database connection: {}", e);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
// Save session to database
|
||||
match save_session(&mut conn, &session) {
|
||||
| Ok(()) => {
|
||||
info!("Session {} saved successfully", session.id);
|
||||
},
|
||||
| Err(e) => {
|
||||
error!("Failed to save session {}: {}", session.id, e);
|
||||
return;
|
||||
},
|
||||
}
|
||||
|
||||
// Save current vector clock
|
||||
if let Some(ref clock) = vector_clock {
|
||||
match save_session_vector_clock(&mut conn, session.id.clone(), clock) {
|
||||
| Ok(()) => {
|
||||
info!("Vector clock saved for session {}", session.id);
|
||||
},
|
||||
| Err(e) => {
|
||||
error!("Failed to save vector clock for session {}: {}", session.id, e);
|
||||
},
|
||||
}
|
||||
}
|
||||
} // conn and db are dropped here
|
||||
|
||||
info!("Session state saved successfully");
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_initialize_session_creates_new() {
|
||||
let mut app = App::new();
|
||||
|
||||
// Run initialize without PersistenceDb - should handle gracefully
|
||||
initialize_session_system(&mut app.world_mut());
|
||||
|
||||
// Should not have CurrentSession (no db)
|
||||
assert!(app.world().get_resource::<CurrentSession>().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_roundtrip() {
|
||||
// Create a session
|
||||
let session_id = SessionId::new();
|
||||
let mut session = Session::new(session_id.clone());
|
||||
session.entity_count = 5;
|
||||
session.transition_to(SessionState::Active);
|
||||
|
||||
// Session should have updated timestamp (or equal if sub-millisecond)
|
||||
assert!(session.last_active >= session.created_at);
|
||||
assert_eq!(session.state, SessionState::Active);
|
||||
assert_eq!(session.entity_count, 5);
|
||||
}
|
||||
}
|
||||
167
crates/libmarathon/src/networking/sync_component.rs
Normal file
167
crates/libmarathon/src/networking/sync_component.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
//! Sync Component trait and supporting types for RFC 0003
|
||||
//!
|
||||
//! This module defines the core trait that all synced components implement,
|
||||
//! along with the types used for strategy selection and merge decisions.
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
/// Sync strategy enum - determines how conflicts are resolved
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum SyncStrategy {
|
||||
/// Last-Write-Wins: Newer timestamp wins, node ID tiebreaker for concurrent
|
||||
LastWriteWins,
|
||||
/// OR-Set: Observed-Remove Set for collections
|
||||
Set,
|
||||
/// Sequence: RGA (Replicated Growable Array) for ordered lists
|
||||
Sequence,
|
||||
/// Custom: User-defined conflict resolution
|
||||
Custom,
|
||||
}
|
||||
|
||||
/// Result of comparing vector clocks
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ClockComparison {
|
||||
/// Remote vector clock is strictly newer
|
||||
RemoteNewer,
|
||||
/// Local vector clock is strictly newer
|
||||
LocalNewer,
|
||||
/// Concurrent (neither is newer)
|
||||
Concurrent,
|
||||
}
|
||||
|
||||
/// Decision made during component merge operation
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ComponentMergeDecision {
|
||||
/// Kept local value
|
||||
KeptLocal,
|
||||
/// Took remote value
|
||||
TookRemote,
|
||||
/// Merged both (for CRDTs)
|
||||
Merged,
|
||||
}
|
||||
|
||||
/// Core trait for synced components
|
||||
///
|
||||
/// This trait is automatically implemented by the `#[derive(Synced)]` macro.
|
||||
/// All synced components must implement this trait.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::{
|
||||
/// ClockComparison,
|
||||
/// ComponentMergeDecision,
|
||||
/// SyncComponent,
|
||||
/// SyncStrategy,
|
||||
/// };
|
||||
///
|
||||
/// // Example showing what the trait looks like - normally generated by #[derive(Synced)]
|
||||
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize)]
|
||||
/// struct Health(f32);
|
||||
///
|
||||
/// // The SyncComponent trait defines these methods that the macro generates
|
||||
/// // You can serialize and deserialize components for sync
|
||||
/// ```
|
||||
pub trait SyncComponent: Component + Reflect + Sized {
|
||||
/// Schema version for this component
|
||||
const VERSION: u32;
|
||||
|
||||
/// Sync strategy for conflict resolution
|
||||
const STRATEGY: SyncStrategy;
|
||||
|
||||
/// Serialize this component to bytes
|
||||
///
|
||||
/// Uses bincode for efficient binary serialization.
|
||||
fn serialize_sync(&self) -> anyhow::Result<Vec<u8>>;
|
||||
|
||||
/// Deserialize this component from bytes
|
||||
///
|
||||
/// Uses bincode to deserialize from the format created by `serialize_sync`.
|
||||
fn deserialize_sync(data: &[u8]) -> anyhow::Result<Self>;
|
||||
|
||||
/// Merge remote state with local state
|
||||
///
|
||||
/// The merge logic is strategy-specific:
|
||||
/// - **LWW**: Takes newer value based on vector clock, uses tiebreaker for
|
||||
/// concurrent
|
||||
/// - **Set**: Merges both sets (OR-Set semantics)
|
||||
/// - **Sequence**: Merges sequences preserving order (RGA semantics)
|
||||
/// - **Custom**: Calls user-defined ConflictResolver
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `remote` - The remote state to merge
|
||||
/// * `clock_cmp` - Result of comparing local and remote vector clocks
|
||||
///
|
||||
/// # Returns
|
||||
/// Decision about what happened during the merge
|
||||
fn merge(&mut self, remote: Self, clock_cmp: ClockComparison) -> ComponentMergeDecision;
|
||||
}
|
||||
|
||||
/// Marker component for entities that should be synced
|
||||
///
|
||||
/// Add this to any entity with synced components to enable automatic
|
||||
/// change detection and synchronization.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::Synced;
|
||||
/// use sync_macros::Synced as SyncedDerive;
|
||||
///
|
||||
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize, SyncedDerive)]
|
||||
/// #[sync(version = 1, strategy = "LastWriteWins")]
|
||||
/// struct Health(f32);
|
||||
///
|
||||
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize, SyncedDerive)]
|
||||
/// #[sync(version = 1, strategy = "LastWriteWins")]
|
||||
/// struct Position {
|
||||
/// x: f32,
|
||||
/// y: f32,
|
||||
/// }
|
||||
///
|
||||
/// let mut world = World::new();
|
||||
/// world.spawn((
|
||||
/// Health(100.0),
|
||||
/// Position { x: 0.0, y: 0.0 },
|
||||
/// Synced, // Marker enables sync
|
||||
/// ));
|
||||
/// ```
|
||||
#[derive(Component, Reflect, Default, Clone, Copy)]
|
||||
#[reflect(Component)]
|
||||
pub struct Synced;
|
||||
|
||||
/// Diagnostic component for debugging sync issues
|
||||
///
|
||||
/// Add this to an entity to get detailed diagnostic output about
|
||||
/// its sync status.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::DiagnoseSync;
|
||||
///
|
||||
/// let mut world = World::new();
|
||||
/// let entity = world.spawn_empty().id();
|
||||
/// world.entity_mut(entity).insert(DiagnoseSync);
|
||||
/// // A diagnostic system will check this entity and log sync status
|
||||
/// ```
|
||||
#[derive(Component, Reflect, Default)]
|
||||
#[reflect(Component)]
|
||||
pub struct DiagnoseSync;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn strategy_enum_works() {
|
||||
assert_eq!(SyncStrategy::LastWriteWins, SyncStrategy::LastWriteWins);
|
||||
assert_ne!(SyncStrategy::LastWriteWins, SyncStrategy::Set);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clock_comparison_works() {
|
||||
assert_eq!(ClockComparison::RemoteNewer, ClockComparison::RemoteNewer);
|
||||
assert_ne!(ClockComparison::RemoteNewer, ClockComparison::LocalNewer);
|
||||
}
|
||||
}
|
||||
431
crates/libmarathon/src/networking/tombstones.rs
Normal file
431
crates/libmarathon/src/networking/tombstones.rs
Normal file
@@ -0,0 +1,431 @@
|
||||
//! Entity tombstone tracking for deletion semantics
|
||||
//!
|
||||
//! This module manages tombstones for deleted entities, preventing resurrection
|
||||
//! and supporting eventual garbage collection.
|
||||
//!
|
||||
//! ## Deletion Semantics
|
||||
//!
|
||||
//! When an entity is deleted:
|
||||
//! 1. A Delete operation is generated with current vector clock
|
||||
//! 2. The entity is marked as deleted (tombstone) in TombstoneRegistry
|
||||
//! 3. The tombstone is propagated to all peers
|
||||
//! 4. Operations older than the deletion are ignored
|
||||
//! 5. After a grace period, tombstones can be garbage collected
|
||||
//!
|
||||
//! ## Resurrection Prevention
|
||||
//!
|
||||
//! If a peer creates an entity (Set operation) while another peer deletes it:
|
||||
//! - Use vector clock comparison: if delete happened-after create, deletion
|
||||
//! wins
|
||||
//! - If concurrent, deletion wins (delete bias for safety)
|
||||
//! - This prevents "zombie" entities from reappearing
|
||||
//!
|
||||
//! ## Garbage Collection
|
||||
//!
|
||||
//! Tombstones are kept for a configurable period (default: 1 hour) to handle
|
||||
//! late-arriving operations. After this period, they can be safely removed.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::networking::{
|
||||
GossipBridge,
|
||||
NodeVectorClock,
|
||||
vector_clock::{
|
||||
NodeId,
|
||||
VectorClock,
|
||||
},
|
||||
};
|
||||
|
||||
/// How long to keep tombstones before garbage collection (in seconds)
|
||||
const TOMBSTONE_TTL_SECS: u64 = 3600; // 1 hour
|
||||
|
||||
/// A tombstone record for a deleted entity
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Tombstone {
|
||||
/// The entity that was deleted
|
||||
pub entity_id: uuid::Uuid,
|
||||
|
||||
/// Node that initiated the deletion
|
||||
pub deleting_node: NodeId,
|
||||
|
||||
/// Vector clock when deletion occurred
|
||||
pub deletion_clock: VectorClock,
|
||||
|
||||
/// When this tombstone was created (for garbage collection)
|
||||
pub timestamp: std::time::Instant,
|
||||
}
|
||||
|
||||
/// Resource tracking tombstones for deleted entities
|
||||
///
|
||||
/// This prevents deleted entities from being resurrected by late-arriving
|
||||
/// create operations.
|
||||
#[derive(Resource, Default)]
|
||||
pub struct TombstoneRegistry {
|
||||
/// Map from entity ID to tombstone
|
||||
tombstones: HashMap<uuid::Uuid, Tombstone>,
|
||||
}
|
||||
|
||||
impl TombstoneRegistry {
|
||||
/// Create a new tombstone registry
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
tombstones: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an entity is deleted
|
||||
pub fn is_deleted(&self, entity_id: uuid::Uuid) -> bool {
|
||||
self.tombstones.contains_key(&entity_id)
|
||||
}
|
||||
|
||||
/// Get the tombstone for an entity, if it exists
|
||||
pub fn get_tombstone(&self, entity_id: uuid::Uuid) -> Option<&Tombstone> {
|
||||
self.tombstones.get(&entity_id)
|
||||
}
|
||||
|
||||
/// Record a deletion
|
||||
///
|
||||
/// This creates a tombstone for the entity. If a tombstone already exists
|
||||
/// and the new deletion has a later clock, it replaces the old one.
|
||||
pub fn record_deletion(
|
||||
&mut self,
|
||||
entity_id: uuid::Uuid,
|
||||
deleting_node: NodeId,
|
||||
deletion_clock: VectorClock,
|
||||
) {
|
||||
// Check if we already have a tombstone
|
||||
if let Some(existing) = self.tombstones.get(&entity_id) {
|
||||
// Only update if the new deletion is later
|
||||
// (new deletion happened-after existing = existing happened-before new)
|
||||
if existing.deletion_clock.happened_before(&deletion_clock) {
|
||||
self.tombstones.insert(
|
||||
entity_id,
|
||||
Tombstone {
|
||||
entity_id,
|
||||
deleting_node,
|
||||
deletion_clock,
|
||||
timestamp: std::time::Instant::now(),
|
||||
},
|
||||
);
|
||||
debug!("Updated tombstone for entity {:?}", entity_id);
|
||||
} else {
|
||||
debug!(
|
||||
"Ignoring older or concurrent deletion for entity {:?}",
|
||||
entity_id
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// New tombstone
|
||||
self.tombstones.insert(
|
||||
entity_id,
|
||||
Tombstone {
|
||||
entity_id,
|
||||
deleting_node,
|
||||
deletion_clock,
|
||||
timestamp: std::time::Instant::now(),
|
||||
},
|
||||
);
|
||||
info!("Created tombstone for entity {:?}", entity_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an operation should be ignored because the entity is deleted
|
||||
///
|
||||
/// Returns true if:
|
||||
/// - The entity has a tombstone AND
|
||||
/// - The operation's clock happened-before or is concurrent with the
|
||||
/// deletion
|
||||
///
|
||||
/// This prevents operations on deleted entities from being applied.
|
||||
pub fn should_ignore_operation(
|
||||
&self,
|
||||
entity_id: uuid::Uuid,
|
||||
operation_clock: &VectorClock,
|
||||
) -> bool {
|
||||
if let Some(tombstone) = self.tombstones.get(&entity_id) {
|
||||
// If operation happened-before deletion, ignore it
|
||||
// operation_clock.happened_before(deletion_clock) => ignore
|
||||
|
||||
// If deletion happened-before operation, don't ignore (resurrection)
|
||||
// deletion_clock.happened_before(operation_clock) => don't ignore
|
||||
|
||||
// If concurrent, deletion wins (delete bias) => ignore
|
||||
// !operation_clock.happened_before(deletion_clock) &&
|
||||
// !deletion_clock.happened_before(operation_clock) => ignore
|
||||
|
||||
// So we DON'T ignore only if deletion happened-before operation
|
||||
!tombstone.deletion_clock.happened_before(operation_clock)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove old tombstones that are past their TTL
|
||||
///
|
||||
/// This should be called periodically to prevent unbounded growth.
|
||||
pub fn garbage_collect(&mut self) {
|
||||
let ttl = std::time::Duration::from_secs(TOMBSTONE_TTL_SECS);
|
||||
let now = std::time::Instant::now();
|
||||
|
||||
let before_count = self.tombstones.len();
|
||||
|
||||
self.tombstones
|
||||
.retain(|_, tombstone| now.duration_since(tombstone.timestamp) < ttl);
|
||||
|
||||
let after_count = self.tombstones.len();
|
||||
|
||||
if before_count != after_count {
|
||||
info!(
|
||||
"Garbage collected {} tombstones ({} -> {})",
|
||||
before_count - after_count,
|
||||
before_count,
|
||||
after_count
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the number of tombstones
|
||||
pub fn num_tombstones(&self) -> usize {
|
||||
self.tombstones.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// System to handle entity deletions initiated locally
|
||||
///
|
||||
/// This system watches for entities with the `ToDelete` marker component
|
||||
/// and generates Delete operations for them.
|
||||
///
|
||||
/// # Usage
|
||||
///
|
||||
/// To delete an entity, add the `ToDelete` component:
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::networking::ToDelete;
|
||||
///
|
||||
/// fn delete_entity_system(mut commands: Commands, entity: Entity) {
|
||||
/// commands.entity(entity).insert(ToDelete);
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Component)]
|
||||
pub struct ToDelete;
|
||||
|
||||
pub fn handle_local_deletions_system(
|
||||
mut commands: Commands,
|
||||
query: Query<(Entity, &crate::networking::NetworkedEntity), With<ToDelete>>,
|
||||
mut node_clock: ResMut<NodeVectorClock>,
|
||||
mut tombstone_registry: ResMut<TombstoneRegistry>,
|
||||
mut operation_log: Option<ResMut<crate::networking::OperationLog>>,
|
||||
bridge: Option<Res<GossipBridge>>,
|
||||
) {
|
||||
let Some(bridge) = bridge else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (entity, networked) in query.iter() {
|
||||
// Increment clock for deletion
|
||||
node_clock.tick();
|
||||
|
||||
// Create Delete operation
|
||||
let delete_op = crate::networking::ComponentOpBuilder::new(
|
||||
node_clock.node_id,
|
||||
node_clock.clock.clone(),
|
||||
)
|
||||
.delete();
|
||||
|
||||
// Record tombstone
|
||||
tombstone_registry.record_deletion(
|
||||
networked.network_id,
|
||||
node_clock.node_id,
|
||||
node_clock.clock.clone(),
|
||||
);
|
||||
|
||||
// Create EntityDelta with Delete operation
|
||||
let delta = crate::networking::EntityDelta::new(
|
||||
networked.network_id,
|
||||
node_clock.node_id,
|
||||
node_clock.clock.clone(),
|
||||
vec![delete_op],
|
||||
);
|
||||
|
||||
// Record in operation log
|
||||
if let Some(ref mut log) = operation_log {
|
||||
log.record_operation(delta.clone());
|
||||
}
|
||||
|
||||
// Broadcast deletion
|
||||
let message =
|
||||
crate::networking::VersionedMessage::new(crate::networking::SyncMessage::EntityDelta {
|
||||
entity_id: delta.entity_id,
|
||||
node_id: delta.node_id,
|
||||
vector_clock: delta.vector_clock.clone(),
|
||||
operations: delta.operations.clone(),
|
||||
});
|
||||
|
||||
if let Err(e) = bridge.send(message) {
|
||||
error!("Failed to broadcast Delete operation: {}", e);
|
||||
} else {
|
||||
info!(
|
||||
"Broadcast Delete operation for entity {:?}",
|
||||
networked.network_id
|
||||
);
|
||||
}
|
||||
|
||||
// Despawn the entity locally
|
||||
commands.entity(entity).despawn();
|
||||
}
|
||||
}
|
||||
|
||||
/// System to garbage collect old tombstones
|
||||
///
|
||||
/// This runs periodically to remove tombstones that are past their TTL.
|
||||
pub fn garbage_collect_tombstones_system(
|
||||
mut tombstone_registry: ResMut<TombstoneRegistry>,
|
||||
time: Res<Time>,
|
||||
mut last_gc: Local<f32>,
|
||||
) {
|
||||
// Garbage collect every 5 minutes
|
||||
const GC_INTERVAL: f32 = 300.0;
|
||||
|
||||
*last_gc += time.delta_secs();
|
||||
|
||||
if *last_gc >= GC_INTERVAL {
|
||||
*last_gc = 0.0;
|
||||
|
||||
debug!("Running tombstone garbage collection");
|
||||
tombstone_registry.garbage_collect();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_tombstone_registry_creation() {
|
||||
let registry = TombstoneRegistry::new();
|
||||
assert_eq!(registry.num_tombstones(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_deletion() {
|
||||
let mut registry = TombstoneRegistry::new();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let clock = VectorClock::new();
|
||||
|
||||
registry.record_deletion(entity_id, node_id, clock);
|
||||
|
||||
assert!(registry.is_deleted(entity_id));
|
||||
assert_eq!(registry.num_tombstones(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_ignore_older_operation() {
|
||||
let mut registry = TombstoneRegistry::new();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
|
||||
// Create deletion at clock = 2
|
||||
let mut deletion_clock = VectorClock::new();
|
||||
deletion_clock.increment(node_id);
|
||||
deletion_clock.increment(node_id);
|
||||
|
||||
registry.record_deletion(entity_id, node_id, deletion_clock);
|
||||
|
||||
// Operation at clock = 1 should be ignored
|
||||
let mut old_operation_clock = VectorClock::new();
|
||||
old_operation_clock.increment(node_id);
|
||||
|
||||
assert!(registry.should_ignore_operation(entity_id, &old_operation_clock));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_not_ignore_newer_operation() {
|
||||
let mut registry = TombstoneRegistry::new();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
|
||||
// Create deletion at clock = 1
|
||||
let mut deletion_clock = VectorClock::new();
|
||||
deletion_clock.increment(node_id);
|
||||
|
||||
registry.record_deletion(entity_id, node_id, deletion_clock);
|
||||
|
||||
// Operation at clock = 2 should NOT be ignored (resurrection)
|
||||
let mut new_operation_clock = VectorClock::new();
|
||||
new_operation_clock.increment(node_id);
|
||||
new_operation_clock.increment(node_id);
|
||||
|
||||
assert!(!registry.should_ignore_operation(entity_id, &new_operation_clock));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_delete_wins() {
|
||||
let mut registry = TombstoneRegistry::new();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
// Node 1 deletes
|
||||
let mut delete_clock = VectorClock::new();
|
||||
delete_clock.increment(node1);
|
||||
|
||||
registry.record_deletion(entity_id, node1, delete_clock);
|
||||
|
||||
// Node 2 has concurrent operation
|
||||
let mut concurrent_clock = VectorClock::new();
|
||||
concurrent_clock.increment(node2);
|
||||
|
||||
// Concurrent operation should be ignored (delete bias)
|
||||
assert!(registry.should_ignore_operation(entity_id, &concurrent_clock));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_tombstone_with_later_deletion() {
|
||||
let mut registry = TombstoneRegistry::new();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
|
||||
// First deletion at clock = 1
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node_id);
|
||||
registry.record_deletion(entity_id, node_id, clock1.clone());
|
||||
|
||||
let tombstone1 = registry.get_tombstone(entity_id).unwrap();
|
||||
assert_eq!(tombstone1.deletion_clock, clock1);
|
||||
|
||||
// Second deletion at clock = 2 (later)
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node_id);
|
||||
clock2.increment(node_id);
|
||||
registry.record_deletion(entity_id, node_id, clock2.clone());
|
||||
|
||||
let tombstone2 = registry.get_tombstone(entity_id).unwrap();
|
||||
assert_eq!(tombstone2.deletion_clock, clock2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ignore_older_tombstone_update() {
|
||||
let mut registry = TombstoneRegistry::new();
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
|
||||
// First deletion at clock = 2
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node_id);
|
||||
clock2.increment(node_id);
|
||||
registry.record_deletion(entity_id, node_id, clock2.clone());
|
||||
|
||||
// Try to record older deletion at clock = 1
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node_id);
|
||||
registry.record_deletion(entity_id, node_id, clock1);
|
||||
|
||||
// Should still have the newer tombstone
|
||||
let tombstone = registry.get_tombstone(entity_id).unwrap();
|
||||
assert_eq!(tombstone.deletion_clock, clock2);
|
||||
}
|
||||
}
|
||||
459
crates/libmarathon/src/networking/vector_clock.rs
Normal file
459
crates/libmarathon/src/networking/vector_clock.rs
Normal file
@@ -0,0 +1,459 @@
|
||||
//! Vector clock implementation for distributed causality tracking
|
||||
//!
|
||||
//! Vector clocks allow us to determine the causal relationship between events
|
||||
//! in a distributed system. This is critical for CRDT merge semantics.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use crate::networking::error::{
|
||||
NetworkingError,
|
||||
Result,
|
||||
};
|
||||
|
||||
/// Unique identifier for a node in the distributed system
|
||||
pub type NodeId = uuid::Uuid;
|
||||
|
||||
/// Vector clock for tracking causality in distributed operations
|
||||
///
|
||||
/// A vector clock is a map from node IDs to logical timestamps (sequence
|
||||
/// numbers). Each node maintains its own vector clock and increments its own
|
||||
/// counter for each local operation.
|
||||
///
|
||||
/// # Causal Relationships
|
||||
///
|
||||
/// Given two vector clocks A and B:
|
||||
/// - **A happened-before B** if all of A's counters ≤ B's counters and at least
|
||||
/// one is <
|
||||
/// - **A and B are concurrent** if neither happened-before the other
|
||||
/// - **A and B are identical** if all counters are equal
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::VectorClock;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node1 = Uuid::new_v4();
|
||||
/// let node2 = Uuid::new_v4();
|
||||
///
|
||||
/// let mut clock1 = VectorClock::new();
|
||||
/// clock1.increment(node1); // node1: 1
|
||||
///
|
||||
/// let mut clock2 = VectorClock::new();
|
||||
/// clock2.increment(node2); // node2: 1
|
||||
///
|
||||
/// // These are concurrent - neither happened before the other
|
||||
/// assert!(clock1.is_concurrent_with(&clock2));
|
||||
///
|
||||
/// // Merge the clocks
|
||||
/// clock1.merge(&clock2); // node1: 1, node2: 1
|
||||
/// assert!(clock1.happened_before(&clock2) == false);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct VectorClock {
|
||||
/// Map from node ID to logical timestamp
|
||||
pub clocks: HashMap<NodeId, u64>,
|
||||
}
|
||||
|
||||
impl VectorClock {
|
||||
/// Create a new empty vector clock
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
clocks: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment the clock for a given node
|
||||
///
|
||||
/// This should be called by a node before performing a local operation.
|
||||
/// It increments that node's counter in the vector clock.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::VectorClock;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node = Uuid::new_v4();
|
||||
/// let mut clock = VectorClock::new();
|
||||
///
|
||||
/// clock.increment(node);
|
||||
/// assert_eq!(clock.get(node), 1);
|
||||
///
|
||||
/// clock.increment(node);
|
||||
/// assert_eq!(clock.get(node), 2);
|
||||
/// ```
|
||||
pub fn increment(&mut self, node_id: NodeId) -> u64 {
|
||||
let counter = self.clocks.entry(node_id).or_insert(0);
|
||||
*counter += 1;
|
||||
*counter
|
||||
}
|
||||
|
||||
/// Get the current counter value for a node
|
||||
///
|
||||
/// Returns 0 if the node has never been seen in this vector clock.
|
||||
pub fn get(&self, node_id: NodeId) -> u64 {
|
||||
self.clocks.get(&node_id).copied().unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Merge another vector clock into this one
|
||||
///
|
||||
/// Takes the maximum counter value for each node. This is used when
|
||||
/// receiving a message to update our knowledge of remote operations.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::VectorClock;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node1 = Uuid::new_v4();
|
||||
/// let node2 = Uuid::new_v4();
|
||||
///
|
||||
/// let mut clock1 = VectorClock::new();
|
||||
/// clock1.increment(node1); // node1: 1
|
||||
/// clock1.increment(node1); // node1: 2
|
||||
///
|
||||
/// let mut clock2 = VectorClock::new();
|
||||
/// clock2.increment(node2); // node2: 1
|
||||
///
|
||||
/// clock1.merge(&clock2);
|
||||
/// assert_eq!(clock1.get(node1), 2);
|
||||
/// assert_eq!(clock1.get(node2), 1);
|
||||
/// ```
|
||||
pub fn merge(&mut self, other: &VectorClock) {
|
||||
for (node_id, &counter) in &other.clocks {
|
||||
let current = self.clocks.entry(*node_id).or_insert(0);
|
||||
*current = (*current).max(counter);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this vector clock happened-before another
|
||||
///
|
||||
/// Returns true if all of our counters are ≤ the other's counters,
|
||||
/// and at least one is strictly less.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::VectorClock;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node = Uuid::new_v4();
|
||||
///
|
||||
/// let mut clock1 = VectorClock::new();
|
||||
/// clock1.increment(node); // node: 1
|
||||
///
|
||||
/// let mut clock2 = VectorClock::new();
|
||||
/// clock2.increment(node); // node: 1
|
||||
/// clock2.increment(node); // node: 2
|
||||
///
|
||||
/// assert!(clock1.happened_before(&clock2));
|
||||
/// assert!(!clock2.happened_before(&clock1));
|
||||
/// ```
|
||||
pub fn happened_before(&self, other: &VectorClock) -> bool {
|
||||
// Single-pass optimization: check both conditions simultaneously
|
||||
let mut any_strictly_less = false;
|
||||
|
||||
// Check our nodes in a single pass
|
||||
for (node_id, &our_counter) in &self.clocks {
|
||||
let their_counter = other.get(*node_id);
|
||||
|
||||
// Early exit if we have a counter greater than theirs
|
||||
if our_counter > their_counter {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Track if any counter is strictly less
|
||||
if our_counter < their_counter {
|
||||
any_strictly_less = true;
|
||||
}
|
||||
}
|
||||
|
||||
// If we haven't found a strictly less counter yet, check if they have
|
||||
// nodes we don't know about with non-zero values (those count as strictly less)
|
||||
if !any_strictly_less {
|
||||
any_strictly_less = other.clocks.iter().any(|(node_id, &their_counter)| {
|
||||
!self.clocks.contains_key(node_id) && their_counter > 0
|
||||
});
|
||||
}
|
||||
|
||||
any_strictly_less
|
||||
}
|
||||
|
||||
/// Check if this vector clock is concurrent with another
|
||||
///
|
||||
/// Two clocks are concurrent if neither happened-before the other and they
|
||||
/// are not identical. This means the operations are causally independent
|
||||
/// and need CRDT merge semantics.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use libmarathon::networking::VectorClock;
|
||||
/// use uuid::Uuid;
|
||||
///
|
||||
/// let node1 = Uuid::new_v4();
|
||||
/// let node2 = Uuid::new_v4();
|
||||
///
|
||||
/// let mut clock1 = VectorClock::new();
|
||||
/// clock1.increment(node1); // node1: 1
|
||||
///
|
||||
/// let mut clock2 = VectorClock::new();
|
||||
/// clock2.increment(node2); // node2: 1
|
||||
///
|
||||
/// assert!(clock1.is_concurrent_with(&clock2));
|
||||
/// assert!(clock2.is_concurrent_with(&clock1));
|
||||
/// ```
|
||||
pub fn is_concurrent_with(&self, other: &VectorClock) -> bool {
|
||||
// Identical clocks are not concurrent
|
||||
if self == other {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Concurrent if neither happened-before the other
|
||||
!self.happened_before(other) && !other.happened_before(self)
|
||||
}
|
||||
|
||||
/// Compare two vector clocks
|
||||
///
|
||||
/// Returns:
|
||||
/// - `Ordering::Less` if self happened-before other
|
||||
/// - `Ordering::Greater` if other happened-before self
|
||||
/// - `Ordering::Equal` if they are identical
|
||||
/// - `Err` if they are concurrent
|
||||
pub fn compare(&self, other: &VectorClock) -> Result<std::cmp::Ordering> {
|
||||
if self == other {
|
||||
return Ok(std::cmp::Ordering::Equal);
|
||||
}
|
||||
|
||||
if self.happened_before(other) {
|
||||
return Ok(std::cmp::Ordering::Less);
|
||||
}
|
||||
|
||||
if other.happened_before(self) {
|
||||
return Ok(std::cmp::Ordering::Greater);
|
||||
}
|
||||
|
||||
Err(NetworkingError::VectorClockError(
|
||||
"Clocks are concurrent".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_new_clock() {
|
||||
let clock = VectorClock::new();
|
||||
assert_eq!(clock.clocks.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_increment() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut clock = VectorClock::new();
|
||||
|
||||
assert_eq!(clock.increment(node), 1);
|
||||
assert_eq!(clock.get(node), 1);
|
||||
|
||||
assert_eq!(clock.increment(node), 2);
|
||||
assert_eq!(clock.get(node), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_unknown_node() {
|
||||
let clock = VectorClock::new();
|
||||
let node = uuid::Uuid::new_v4();
|
||||
|
||||
assert_eq!(clock.get(node), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node1);
|
||||
clock1.increment(node1);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node2);
|
||||
|
||||
clock1.merge(&clock2);
|
||||
|
||||
assert_eq!(clock1.get(node1), 2);
|
||||
assert_eq!(clock1.get(node2), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_takes_max() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node);
|
||||
clock2.increment(node);
|
||||
|
||||
clock1.merge(&clock2);
|
||||
assert_eq!(clock1.get(node), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_happened_before() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node);
|
||||
clock2.increment(node);
|
||||
|
||||
assert!(clock1.happened_before(&clock2));
|
||||
assert!(!clock2.happened_before(&clock1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_happened_before_multiple_nodes() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node1);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node1);
|
||||
clock2.increment(node2);
|
||||
|
||||
assert!(clock1.happened_before(&clock2));
|
||||
assert!(!clock2.happened_before(&clock1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node1);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node2);
|
||||
|
||||
assert!(clock1.is_concurrent_with(&clock2));
|
||||
assert!(clock2.is_concurrent_with(&clock1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_happened_before_with_disjoint_nodes() {
|
||||
// Critical test case: clocks with completely different nodes are concurrent,
|
||||
// not happened-before. This test would fail with the old buggy implementation.
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node1); // {node1: 1}
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node2); // {node2: 1}
|
||||
|
||||
// These clocks are concurrent - neither happened before the other
|
||||
assert!(!clock1.happened_before(&clock2));
|
||||
assert!(!clock2.happened_before(&clock1));
|
||||
assert!(clock1.is_concurrent_with(&clock2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_happened_before_with_superset_nodes() {
|
||||
// When one clock has all nodes from another PLUS more nodes,
|
||||
// the smaller clock happened-before the larger one
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node1); // {node1: 1}
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node1); // {node1: 1, node2: 1}
|
||||
clock2.increment(node2);
|
||||
|
||||
// clock1 happened before clock2
|
||||
assert!(clock1.happened_before(&clock2));
|
||||
assert!(!clock2.happened_before(&clock1));
|
||||
assert!(!clock1.is_concurrent_with(&clock2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_identical_clocks() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node);
|
||||
|
||||
assert_eq!(clock1, clock2);
|
||||
assert!(!clock1.happened_before(&clock2));
|
||||
assert!(!clock2.happened_before(&clock1));
|
||||
assert!(!clock1.is_concurrent_with(&clock2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compare() {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node);
|
||||
clock2.increment(node);
|
||||
|
||||
assert_eq!(clock1.compare(&clock2).unwrap(), std::cmp::Ordering::Less);
|
||||
assert_eq!(
|
||||
clock2.compare(&clock1).unwrap(),
|
||||
std::cmp::Ordering::Greater
|
||||
);
|
||||
assert_eq!(clock1.compare(&clock1).unwrap(), std::cmp::Ordering::Equal);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compare_concurrent() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
|
||||
let mut clock1 = VectorClock::new();
|
||||
clock1.increment(node1);
|
||||
|
||||
let mut clock2 = VectorClock::new();
|
||||
clock2.increment(node2);
|
||||
|
||||
assert!(clock1.compare(&clock2).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization() -> bincode::Result<()> {
|
||||
let node = uuid::Uuid::new_v4();
|
||||
let mut clock = VectorClock::new();
|
||||
clock.increment(node);
|
||||
|
||||
let bytes = bincode::serialize(&clock)?;
|
||||
let deserialized: VectorClock = bincode::deserialize(&bytes)?;
|
||||
|
||||
assert_eq!(clock, deserialized);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
253
crates/libmarathon/src/persistence/config.rs
Normal file
253
crates/libmarathon/src/persistence/config.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
//! Configuration for the persistence layer
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use crate::persistence::error::Result;
|
||||
|
||||
/// Default critical flush delay in milliseconds
|
||||
const DEFAULT_CRITICAL_FLUSH_DELAY_MS: u64 = 1000;
|
||||
|
||||
/// Default maximum buffer operations before forced flush
|
||||
const DEFAULT_MAX_BUFFER_OPERATIONS: usize = 1000;
|
||||
|
||||
/// Configuration for the persistence layer
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, bevy::prelude::Resource)]
|
||||
pub struct PersistenceConfig {
|
||||
/// Base flush interval (may be adjusted by battery level)
|
||||
pub flush_interval_secs: u64,
|
||||
|
||||
/// Max time to defer critical writes (entity creation, etc.)
|
||||
pub critical_flush_delay_ms: u64,
|
||||
|
||||
/// WAL checkpoint interval
|
||||
pub checkpoint_interval_secs: u64,
|
||||
|
||||
/// Max WAL size before forced checkpoint (in bytes)
|
||||
pub max_wal_size_bytes: usize,
|
||||
|
||||
/// Maximum number of operations in write buffer before forcing flush
|
||||
pub max_buffer_operations: usize,
|
||||
|
||||
/// Enable adaptive flushing based on battery
|
||||
pub battery_adaptive: bool,
|
||||
|
||||
/// Battery tier configuration
|
||||
pub battery_tiers: BatteryTiers,
|
||||
|
||||
/// Platform-specific settings
|
||||
#[serde(default)]
|
||||
pub platform: PlatformConfig,
|
||||
}
|
||||
|
||||
impl Default for PersistenceConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
flush_interval_secs: 10,
|
||||
critical_flush_delay_ms: DEFAULT_CRITICAL_FLUSH_DELAY_MS,
|
||||
checkpoint_interval_secs: 30,
|
||||
max_wal_size_bytes: 5 * 1024 * 1024, // 5MB
|
||||
max_buffer_operations: DEFAULT_MAX_BUFFER_OPERATIONS,
|
||||
battery_adaptive: true,
|
||||
battery_tiers: BatteryTiers::default(),
|
||||
platform: PlatformConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PersistenceConfig {
|
||||
/// Get the flush interval based on battery status
|
||||
pub fn get_flush_interval(&self, battery_level: f32, is_charging: bool) -> Duration {
|
||||
if !self.battery_adaptive {
|
||||
return Duration::from_secs(self.flush_interval_secs);
|
||||
}
|
||||
|
||||
let interval_secs = if is_charging {
|
||||
self.battery_tiers.charging
|
||||
} else if battery_level > 0.5 {
|
||||
self.battery_tiers.high
|
||||
} else if battery_level > 0.2 {
|
||||
self.battery_tiers.medium
|
||||
} else {
|
||||
self.battery_tiers.low
|
||||
};
|
||||
|
||||
Duration::from_secs(interval_secs)
|
||||
}
|
||||
|
||||
/// Get the critical flush delay
|
||||
pub fn get_critical_flush_delay(&self) -> Duration {
|
||||
Duration::from_millis(self.critical_flush_delay_ms)
|
||||
}
|
||||
|
||||
/// Get the checkpoint interval
|
||||
pub fn get_checkpoint_interval(&self) -> Duration {
|
||||
Duration::from_secs(self.checkpoint_interval_secs)
|
||||
}
|
||||
}
|
||||
|
||||
/// Battery tier flush intervals (in seconds)
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BatteryTiers {
|
||||
/// Flush interval when charging
|
||||
pub charging: u64,
|
||||
|
||||
/// Flush interval when battery > 50%
|
||||
pub high: u64,
|
||||
|
||||
/// Flush interval when battery 20-50%
|
||||
pub medium: u64,
|
||||
|
||||
/// Flush interval when battery < 20%
|
||||
pub low: u64,
|
||||
}
|
||||
|
||||
impl Default for BatteryTiers {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
charging: 5,
|
||||
high: 10,
|
||||
medium: 30,
|
||||
low: 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Platform-specific configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct PlatformConfig {
|
||||
/// iOS-specific settings
|
||||
#[serde(default)]
|
||||
pub ios: IosConfig,
|
||||
}
|
||||
|
||||
/// iOS-specific configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct IosConfig {
|
||||
/// How long to wait for background flush before giving up (seconds)
|
||||
pub background_flush_timeout_secs: u64,
|
||||
|
||||
/// Flush interval when in low power mode (seconds)
|
||||
pub low_power_mode_interval_secs: u64,
|
||||
}
|
||||
|
||||
impl Default for IosConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
background_flush_timeout_secs: 5,
|
||||
low_power_mode_interval_secs: 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Load persistence configuration from a TOML string
|
||||
///
|
||||
/// Parses TOML configuration and validates all settings. Use this for
|
||||
/// loading configuration from embedded strings or dynamic sources.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `toml`: TOML-formatted configuration string
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(PersistenceConfig)`: Parsed and validated configuration
|
||||
/// - `Err`: If TOML is invalid or contains invalid values
|
||||
///
|
||||
/// # Example TOML
|
||||
/// ```toml
|
||||
/// flush_interval_secs = 10
|
||||
/// battery_adaptive = true
|
||||
/// [battery_tiers]
|
||||
/// charging = 5
|
||||
/// high = 10
|
||||
/// ```
|
||||
pub fn load_config_from_str(toml: &str) -> Result<PersistenceConfig> {
|
||||
Ok(toml::from_str(toml)?)
|
||||
}
|
||||
|
||||
/// Load persistence configuration from a TOML file
|
||||
///
|
||||
/// Reads and parses a TOML configuration file. This is the recommended way
|
||||
/// to load configuration for production use, allowing runtime configuration
|
||||
/// changes without recompilation.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `path`: Path to TOML configuration file
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(PersistenceConfig)`: Loaded configuration
|
||||
/// - `Err`: If file can't be read or TOML is invalid
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// # use libmarathon::persistence::*;
|
||||
/// # fn example() -> Result<()> {
|
||||
/// let config = load_config_from_file("persistence.toml")?;
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn load_config_from_file(path: impl AsRef<std::path::Path>) -> Result<PersistenceConfig> {
|
||||
let content = std::fs::read_to_string(path)?;
|
||||
Ok(load_config_from_str(&content)?)
|
||||
}
|
||||
|
||||
/// Serialize persistence configuration to a TOML string
|
||||
///
|
||||
/// Converts configuration to human-readable TOML format. Use this to
|
||||
/// save configuration to files or display current settings.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `config`: Configuration to serialize
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(String)`: Pretty-printed TOML configuration
|
||||
/// - `Err`: If serialization fails (rare)
|
||||
pub fn save_config_to_str(config: &PersistenceConfig) -> Result<String> {
|
||||
Ok(toml::to_string_pretty(config)?)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_default_config() {
|
||||
let config = PersistenceConfig::default();
|
||||
assert_eq!(config.flush_interval_secs, 10);
|
||||
assert_eq!(config.battery_adaptive, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_battery_adaptive_intervals() {
|
||||
let config = PersistenceConfig::default();
|
||||
|
||||
// Charging
|
||||
let interval = config.get_flush_interval(0.3, true);
|
||||
assert_eq!(interval, Duration::from_secs(5));
|
||||
|
||||
// High battery
|
||||
let interval = config.get_flush_interval(0.8, false);
|
||||
assert_eq!(interval, Duration::from_secs(10));
|
||||
|
||||
// Medium battery
|
||||
let interval = config.get_flush_interval(0.4, false);
|
||||
assert_eq!(interval, Duration::from_secs(30));
|
||||
|
||||
// Low battery
|
||||
let interval = config.get_flush_interval(0.1, false);
|
||||
assert_eq!(interval, Duration::from_secs(60));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_serialization() {
|
||||
let config = PersistenceConfig::default();
|
||||
let toml = save_config_to_str(&config).unwrap();
|
||||
let loaded = load_config_from_str(&toml).unwrap();
|
||||
|
||||
assert_eq!(config.flush_interval_secs, loaded.flush_interval_secs);
|
||||
assert_eq!(config.battery_adaptive, loaded.battery_adaptive);
|
||||
}
|
||||
}
|
||||
716
crates/libmarathon/src/persistence/database.rs
Normal file
716
crates/libmarathon/src/persistence/database.rs
Normal file
@@ -0,0 +1,716 @@
|
||||
//! Database schema and operations for persistence layer
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use chrono::Utc;
|
||||
use rusqlite::{
|
||||
Connection,
|
||||
OptionalExtension,
|
||||
};
|
||||
|
||||
use crate::persistence::{
|
||||
error::{
|
||||
PersistenceError,
|
||||
Result,
|
||||
},
|
||||
types::*,
|
||||
};
|
||||
|
||||
/// Default SQLite page size in bytes (4KB)
|
||||
const DEFAULT_PAGE_SIZE: i64 = 4096;
|
||||
|
||||
/// Cache size for SQLite in KB (negative value = KB instead of pages)
|
||||
const CACHE_SIZE_KB: i64 = -20000; // 20MB
|
||||
|
||||
/// Get current Unix timestamp in seconds
|
||||
///
|
||||
/// Helper to avoid repeating `Utc::now().timestamp()` throughout the code
|
||||
#[inline]
|
||||
fn current_timestamp() -> i64 {
|
||||
Utc::now().timestamp()
|
||||
}
|
||||
|
||||
/// Initialize SQLite connection with WAL mode and optimizations
|
||||
pub fn initialize_persistence_db<P: AsRef<Path>>(path: P) -> Result<Connection> {
|
||||
let mut conn = Connection::open(path)?;
|
||||
|
||||
configure_sqlite_for_persistence(&conn)?;
|
||||
|
||||
// Run migrations to ensure schema is up to date
|
||||
crate::persistence::run_migrations(&mut conn)?;
|
||||
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
/// Configure SQLite with WAL mode and battery-friendly settings
|
||||
pub fn configure_sqlite_for_persistence(conn: &Connection) -> Result<()> {
|
||||
// Enable Write-Ahead Logging for better concurrency and fewer fsyncs
|
||||
conn.execute_batch("PRAGMA journal_mode = WAL;")?;
|
||||
|
||||
// Don't auto-checkpoint on every transaction - we'll control this manually
|
||||
conn.execute_batch("PRAGMA wal_autocheckpoint = 0;")?;
|
||||
|
||||
// NORMAL synchronous mode - fsync WAL on commit, but not every write
|
||||
// This is a good balance between durability and performance
|
||||
conn.execute_batch("PRAGMA synchronous = NORMAL;")?;
|
||||
|
||||
// Larger page size for better sequential write performance on mobile
|
||||
// Note: This must be set before the database is created or after VACUUM
|
||||
// We'll skip setting it if database already exists to avoid issues
|
||||
let page_size: i64 = conn.query_row("PRAGMA page_size", [], |row| row.get(0))?;
|
||||
if page_size == DEFAULT_PAGE_SIZE {
|
||||
// Try to set larger page size, but only if we're at default
|
||||
// This will only work on a fresh database
|
||||
let _ = conn.execute_batch("PRAGMA page_size = 8192;");
|
||||
}
|
||||
|
||||
// Increase cache size for better performance (in pages, negative = KB)
|
||||
conn.execute_batch(&format!("PRAGMA cache_size = {};", CACHE_SIZE_KB))?;
|
||||
|
||||
// Use memory for temp tables (faster, we don't need temp table durability)
|
||||
conn.execute_batch("PRAGMA temp_store = MEMORY;")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create the database schema for persistence
|
||||
pub fn create_persistence_schema(conn: &Connection) -> Result<()> {
|
||||
// Entities table - stores entity metadata
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS entities (
|
||||
id BLOB PRIMARY KEY,
|
||||
entity_type TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
// Components table - stores serialized component data
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS components (
|
||||
entity_id BLOB NOT NULL,
|
||||
component_type TEXT NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
updated_at INTEGER NOT NULL,
|
||||
PRIMARY KEY (entity_id, component_type),
|
||||
FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE
|
||||
)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
// Index for querying components by entity
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_components_entity
|
||||
ON components(entity_id)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
// Operation log - for CRDT sync protocol
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS operation_log (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
node_id TEXT NOT NULL,
|
||||
sequence_number INTEGER NOT NULL,
|
||||
operation BLOB NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
UNIQUE(node_id, sequence_number)
|
||||
)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
// Index for efficient operation log queries
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_oplog_node_seq
|
||||
ON operation_log(node_id, sequence_number)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
// Vector clock table - for causality tracking
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS vector_clock (
|
||||
node_id TEXT PRIMARY KEY,
|
||||
counter INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
// Session state table - for crash detection
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS session_state (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
// WAL checkpoint tracking
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS checkpoint_state (
|
||||
last_checkpoint INTEGER NOT NULL,
|
||||
wal_size_bytes INTEGER NOT NULL
|
||||
)",
|
||||
[],
|
||||
)?;
|
||||
|
||||
// Initialize checkpoint state if not exists
|
||||
conn.execute(
|
||||
"INSERT OR IGNORE INTO checkpoint_state (rowid, last_checkpoint, wal_size_bytes)
|
||||
VALUES (1, ?, 0)",
|
||||
[current_timestamp()],
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flush a batch of operations to SQLite in a single transaction
|
||||
pub fn flush_to_sqlite(ops: &[PersistenceOp], conn: &mut Connection) -> Result<usize> {
|
||||
if ops.is_empty() {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let tx = conn.transaction()?;
|
||||
let mut count = 0;
|
||||
|
||||
for op in ops {
|
||||
match op {
|
||||
| PersistenceOp::UpsertEntity { id, data } => {
|
||||
tx.execute(
|
||||
"INSERT OR REPLACE INTO entities (id, entity_type, created_at, updated_at)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
rusqlite::params![
|
||||
id.as_bytes(),
|
||||
data.entity_type,
|
||||
data.created_at.timestamp(),
|
||||
data.updated_at.timestamp(),
|
||||
],
|
||||
)?;
|
||||
count += 1;
|
||||
},
|
||||
|
||||
| PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type,
|
||||
data,
|
||||
} => {
|
||||
tx.execute(
|
||||
"INSERT OR REPLACE INTO components (entity_id, component_type, data, updated_at)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
rusqlite::params![
|
||||
entity_id.as_bytes(),
|
||||
component_type,
|
||||
data,
|
||||
current_timestamp(),
|
||||
],
|
||||
)?;
|
||||
count += 1;
|
||||
},
|
||||
|
||||
| PersistenceOp::LogOperation {
|
||||
node_id,
|
||||
sequence,
|
||||
operation,
|
||||
} => {
|
||||
tx.execute(
|
||||
"INSERT OR REPLACE INTO operation_log (node_id, sequence_number, operation, timestamp)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
rusqlite::params![
|
||||
&node_id.to_string(), // Convert UUID to string for SQLite TEXT column
|
||||
sequence,
|
||||
operation,
|
||||
current_timestamp(),
|
||||
],
|
||||
)?;
|
||||
count += 1;
|
||||
},
|
||||
|
||||
| PersistenceOp::UpdateVectorClock { node_id, counter } => {
|
||||
tx.execute(
|
||||
"INSERT OR REPLACE INTO vector_clock (node_id, counter, updated_at)
|
||||
VALUES (?1, ?2, ?3)",
|
||||
rusqlite::params![&node_id.to_string(), counter, current_timestamp()], // Convert UUID to string
|
||||
)?;
|
||||
count += 1;
|
||||
},
|
||||
|
||||
| PersistenceOp::DeleteEntity { id } => {
|
||||
tx.execute(
|
||||
"DELETE FROM entities WHERE id = ?1",
|
||||
rusqlite::params![id.as_bytes()],
|
||||
)?;
|
||||
count += 1;
|
||||
},
|
||||
|
||||
| PersistenceOp::DeleteComponent {
|
||||
entity_id,
|
||||
component_type,
|
||||
} => {
|
||||
tx.execute(
|
||||
"DELETE FROM components WHERE entity_id = ?1 AND component_type = ?2",
|
||||
rusqlite::params![entity_id.as_bytes(), component_type],
|
||||
)?;
|
||||
count += 1;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
tx.commit()?;
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
/// Manually checkpoint the WAL file to merge changes into the main database
|
||||
///
|
||||
/// This function performs a SQLite WAL checkpoint, which copies frames from the
|
||||
/// write-ahead log back into the main database file. This is crucial for:
|
||||
/// - Reducing WAL file size to save disk space
|
||||
/// - Ensuring durability of committed transactions
|
||||
/// - Maintaining database integrity
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `conn`: Mutable reference to the SQLite connection
|
||||
/// - `mode`: Checkpoint mode controlling blocking behavior (see
|
||||
/// [`CheckpointMode`])
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(CheckpointInfo)`: Information about the checkpoint operation
|
||||
/// - `Err`: If the checkpoint fails or database state update fails
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// # use rusqlite::Connection;
|
||||
/// # use libmarathon::persistence::*;
|
||||
/// # fn example() -> anyhow::Result<()> {
|
||||
/// let mut conn = Connection::open("app.db")?;
|
||||
/// let info = checkpoint_wal(&mut conn, CheckpointMode::Passive)?;
|
||||
/// if info.busy {
|
||||
/// // Some pages couldn't be checkpointed due to active readers
|
||||
/// }
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn checkpoint_wal(conn: &mut Connection, mode: CheckpointMode) -> Result<CheckpointInfo> {
|
||||
let mode_str = match mode {
|
||||
| CheckpointMode::Passive => "PASSIVE",
|
||||
| CheckpointMode::Full => "FULL",
|
||||
| CheckpointMode::Restart => "RESTART",
|
||||
| CheckpointMode::Truncate => "TRUNCATE",
|
||||
};
|
||||
|
||||
let query = format!("PRAGMA wal_checkpoint({})", mode_str);
|
||||
|
||||
// Returns (busy, log_pages, checkpointed_pages)
|
||||
let (busy, log_pages, checkpointed_pages): (i32, i32, i32) =
|
||||
conn.query_row(&query, [], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
||||
})?;
|
||||
|
||||
// Update checkpoint state
|
||||
conn.execute(
|
||||
"UPDATE checkpoint_state SET last_checkpoint = ?1 WHERE rowid = 1",
|
||||
[current_timestamp()],
|
||||
)?;
|
||||
|
||||
Ok(CheckpointInfo {
|
||||
busy: busy != 0,
|
||||
log_pages,
|
||||
checkpointed_pages,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the size of the WAL file in bytes
|
||||
///
|
||||
/// This checks the actual WAL file size on disk without triggering a
|
||||
/// checkpoint. Large WAL files consume disk space and can slow down recovery,
|
||||
/// so monitoring size helps maintain optimal performance.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `conn`: Reference to the SQLite connection
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(i64)`: WAL file size in bytes (0 if no WAL exists or in-memory
|
||||
/// database)
|
||||
/// - `Err`: If the database path query fails
|
||||
///
|
||||
/// # Note
|
||||
/// For in-memory databases, always returns 0.
|
||||
pub fn get_wal_size(conn: &Connection) -> Result<i64> {
|
||||
// Get the database file path
|
||||
let db_path: Option<String> = conn
|
||||
.query_row("PRAGMA database_list", [], |row| row.get::<_, String>(2))
|
||||
.optional()?;
|
||||
|
||||
// If no path (in-memory database), return 0
|
||||
let Some(db_path) = db_path else {
|
||||
return Ok(0);
|
||||
};
|
||||
|
||||
// WAL file has same name as database but with -wal suffix
|
||||
let wal_path = format!("{}-wal", db_path);
|
||||
|
||||
// Check if WAL file exists and get its size
|
||||
match std::fs::metadata(&wal_path) {
|
||||
| Ok(metadata) => Ok(metadata.len() as i64),
|
||||
| Err(_) => Ok(0), // WAL doesn't exist yet
|
||||
}
|
||||
}
|
||||
|
||||
/// Checkpoint mode for WAL
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum CheckpointMode {
|
||||
/// Passive checkpoint - doesn't block readers/writers
|
||||
Passive,
|
||||
/// Full checkpoint - waits for writers to finish
|
||||
Full,
|
||||
/// Restart checkpoint - like Full, but restarts WAL file
|
||||
Restart,
|
||||
/// Truncate checkpoint - like Restart, but truncates WAL file to 0 bytes
|
||||
Truncate,
|
||||
}
|
||||
|
||||
/// Information about a checkpoint operation
|
||||
#[derive(Debug)]
|
||||
pub struct CheckpointInfo {
|
||||
pub busy: bool,
|
||||
pub log_pages: i32,
|
||||
pub checkpointed_pages: i32,
|
||||
}
|
||||
|
||||
/// Set a session state value in the database
|
||||
///
|
||||
/// Session state is used to track application lifecycle events and detect
|
||||
/// crashes. Values persist across restarts, enabling crash detection and
|
||||
/// recovery.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `conn`: Mutable reference to the SQLite connection
|
||||
/// - `key`: State key (e.g., "clean_shutdown", "session_id")
|
||||
/// - `value`: State value to store
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(())`: State was successfully saved
|
||||
/// - `Err`: If the database write fails
|
||||
pub fn set_session_state(conn: &mut Connection, key: &str, value: &str) -> Result<()> {
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO session_state (key, value, updated_at)
|
||||
VALUES (?1, ?2, ?3)",
|
||||
rusqlite::params![key, value, current_timestamp()],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a session state value from the database
|
||||
///
|
||||
/// Retrieves persistent state information stored across application sessions.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `conn`: Reference to the SQLite connection
|
||||
/// - `key`: State key to retrieve
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(Some(value))`: State exists and was retrieved
|
||||
/// - `Ok(None)`: State key doesn't exist
|
||||
/// - `Err`: If the database query fails
|
||||
pub fn get_session_state(conn: &Connection, key: &str) -> Result<Option<String>> {
|
||||
conn.query_row(
|
||||
"SELECT value FROM session_state WHERE key = ?1",
|
||||
rusqlite::params![key],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.optional()
|
||||
.map_err(|e| PersistenceError::Database(e))
|
||||
}
|
||||
|
||||
/// Check if the previous session had a clean shutdown
|
||||
///
|
||||
/// This is critical for crash detection. When the application starts, this
|
||||
/// checks if the previous session ended cleanly. If not, it indicates a crash
|
||||
/// occurred, and recovery procedures may be needed.
|
||||
///
|
||||
/// **Side effect**: Resets the clean_shutdown flag to "false" for the current
|
||||
/// session. Call [`mark_clean_shutdown`] during normal shutdown to set it back
|
||||
/// to "true".
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `conn`: Mutable reference to the SQLite connection (mutates session state)
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(true)`: Previous session shut down cleanly
|
||||
/// - `Ok(false)`: Previous session crashed or this is first run
|
||||
/// - `Err`: If database operations fail
|
||||
pub fn check_clean_shutdown(conn: &mut Connection) -> Result<bool> {
|
||||
let clean = get_session_state(conn, "clean_shutdown")?
|
||||
.map(|v| v == "true")
|
||||
.unwrap_or(false);
|
||||
|
||||
// Reset for this session
|
||||
set_session_state(conn, "clean_shutdown", "false")?;
|
||||
|
||||
Ok(clean)
|
||||
}
|
||||
|
||||
/// Mark the current session as cleanly shut down
|
||||
///
|
||||
/// Call this during normal application shutdown to indicate clean termination.
|
||||
/// The next startup will detect this flag via [`check_clean_shutdown`] and know
|
||||
/// no crash occurred.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `conn`: Mutable reference to the SQLite connection
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(())`: Clean shutdown flag was set
|
||||
/// - `Err`: If the database write fails
|
||||
pub fn mark_clean_shutdown(conn: &mut Connection) -> Result<()> {
|
||||
set_session_state(conn, "clean_shutdown", "true")
|
||||
}
|
||||
|
||||
//
|
||||
// ============================================================================
|
||||
// Session Management Operations
|
||||
// ============================================================================
|
||||
//
|
||||
|
||||
/// Save session metadata to database
|
||||
pub fn save_session(conn: &mut Connection, session: &crate::networking::Session) -> Result<()> {
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO sessions (id, code, name, created_at, last_active, entity_count, state, secret)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
|
||||
rusqlite::params![
|
||||
session.id.as_uuid().as_bytes(),
|
||||
session.id.to_code(),
|
||||
session.name,
|
||||
session.created_at,
|
||||
session.last_active,
|
||||
session.entity_count as i64,
|
||||
session.state.to_string(),
|
||||
session.secret,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load session by ID
|
||||
pub fn load_session(
|
||||
conn: &Connection,
|
||||
session_id: crate::networking::SessionId,
|
||||
) -> Result<Option<crate::networking::Session>> {
|
||||
conn.query_row(
|
||||
"SELECT code, name, created_at, last_active, entity_count, state, secret
|
||||
FROM sessions WHERE id = ?1",
|
||||
[session_id.as_uuid().as_bytes()],
|
||||
|row| {
|
||||
let code: String = row.get(0)?;
|
||||
let state_str: String = row.get(5)?;
|
||||
let state = crate::networking::SessionState::from_str(&state_str)
|
||||
.unwrap_or(crate::networking::SessionState::Created);
|
||||
|
||||
// Reconstruct SessionId from the stored code
|
||||
let id = crate::networking::SessionId::from_code(&code)
|
||||
.map_err(|_| rusqlite::Error::InvalidQuery)?;
|
||||
|
||||
Ok(crate::networking::Session {
|
||||
id,
|
||||
name: row.get(1)?,
|
||||
created_at: row.get(2)?,
|
||||
last_active: row.get(3)?,
|
||||
entity_count: row.get::<_, i64>(4)? as usize,
|
||||
state,
|
||||
secret: row.get(6)?,
|
||||
})
|
||||
},
|
||||
)
|
||||
.optional()
|
||||
.map_err(PersistenceError::from)
|
||||
}
|
||||
|
||||
/// Get the most recently active session
|
||||
pub fn get_last_active_session(conn: &Connection) -> Result<Option<crate::networking::Session>> {
|
||||
conn.query_row(
|
||||
"SELECT code, name, created_at, last_active, entity_count, state, secret
|
||||
FROM sessions ORDER BY last_active DESC LIMIT 1",
|
||||
[],
|
||||
|row| {
|
||||
let code: String = row.get(0)?;
|
||||
let state_str: String = row.get(5)?;
|
||||
let state = crate::networking::SessionState::from_str(&state_str)
|
||||
.unwrap_or(crate::networking::SessionState::Created);
|
||||
|
||||
// Reconstruct SessionId from the stored code
|
||||
let id = crate::networking::SessionId::from_code(&code)
|
||||
.map_err(|_| rusqlite::Error::InvalidQuery)?;
|
||||
|
||||
Ok(crate::networking::Session {
|
||||
id,
|
||||
name: row.get(1)?,
|
||||
created_at: row.get(2)?,
|
||||
last_active: row.get(3)?,
|
||||
entity_count: row.get::<_, i64>(4)? as usize,
|
||||
state,
|
||||
secret: row.get(6)?,
|
||||
})
|
||||
},
|
||||
)
|
||||
.optional()
|
||||
.map_err(PersistenceError::from)
|
||||
}
|
||||
|
||||
/// Save session vector clock to database
|
||||
pub fn save_session_vector_clock(
|
||||
conn: &mut Connection,
|
||||
session_id: crate::networking::SessionId,
|
||||
clock: &crate::networking::VectorClock,
|
||||
) -> Result<()> {
|
||||
let tx = conn.transaction()?;
|
||||
|
||||
// Delete old clock entries for this session
|
||||
tx.execute(
|
||||
"DELETE FROM vector_clock WHERE session_id = ?1",
|
||||
[session_id.as_uuid().as_bytes()],
|
||||
)?;
|
||||
|
||||
// Insert current clock state
|
||||
for (node_id, &counter) in &clock.clocks {
|
||||
tx.execute(
|
||||
"INSERT INTO vector_clock (session_id, node_id, counter, updated_at)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
rusqlite::params![
|
||||
session_id.as_uuid().as_bytes(),
|
||||
node_id.to_string(),
|
||||
counter as i64,
|
||||
current_timestamp(),
|
||||
],
|
||||
)?;
|
||||
}
|
||||
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load session vector clock from database
|
||||
pub fn load_session_vector_clock(
|
||||
conn: &Connection,
|
||||
session_id: crate::networking::SessionId,
|
||||
) -> Result<crate::networking::VectorClock> {
|
||||
let mut stmt =
|
||||
conn.prepare("SELECT node_id, counter FROM vector_clock WHERE session_id = ?1")?;
|
||||
|
||||
let mut clock = crate::networking::VectorClock::new();
|
||||
let rows = stmt.query_map([session_id.as_uuid().as_bytes()], |row| {
|
||||
let node_id_str: String = row.get(0)?;
|
||||
let counter: i64 = row.get(1)?;
|
||||
Ok((node_id_str, counter))
|
||||
})?;
|
||||
|
||||
for row in rows {
|
||||
let (node_id_str, counter) = row?;
|
||||
if let Ok(node_id) = uuid::Uuid::parse_str(&node_id_str) {
|
||||
clock.clocks.insert(node_id, counter as u64);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(clock)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_database_initialization() -> Result<()> {
|
||||
let conn = Connection::open_in_memory()?;
|
||||
configure_sqlite_for_persistence(&conn)?;
|
||||
create_persistence_schema(&conn)?;
|
||||
|
||||
// Verify tables exist
|
||||
let tables: Vec<String> = conn
|
||||
.prepare("SELECT name FROM sqlite_master WHERE type='table'")?
|
||||
.query_map([], |row| row.get(0))?
|
||||
.collect::<std::result::Result<Vec<_>, _>>()?;
|
||||
|
||||
assert!(tables.contains(&"entities".to_string()));
|
||||
assert!(tables.contains(&"components".to_string()));
|
||||
assert!(tables.contains(&"operation_log".to_string()));
|
||||
assert!(tables.contains(&"vector_clock".to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_operations() -> Result<()> {
|
||||
let mut conn = Connection::open_in_memory()?;
|
||||
create_persistence_schema(&conn)?;
|
||||
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let ops = vec![
|
||||
PersistenceOp::UpsertEntity {
|
||||
id: entity_id,
|
||||
data: EntityData {
|
||||
id: entity_id,
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
entity_type: "TestEntity".to_string(),
|
||||
},
|
||||
},
|
||||
PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![1, 2, 3, 4],
|
||||
},
|
||||
];
|
||||
|
||||
let count = flush_to_sqlite(&ops, &mut conn)?;
|
||||
assert_eq!(count, 2);
|
||||
|
||||
// Verify entity exists
|
||||
let exists: bool = conn.query_row(
|
||||
"SELECT COUNT(*) > 0 FROM entities WHERE id = ?1",
|
||||
rusqlite::params![entity_id.as_bytes()],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
assert!(exists);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_state() -> Result<()> {
|
||||
let mut conn = Connection::open_in_memory()?;
|
||||
create_persistence_schema(&conn)?;
|
||||
|
||||
set_session_state(&mut conn, "test_key", "test_value")?;
|
||||
let value = get_session_state(&conn, "test_key")?;
|
||||
assert_eq!(value, Some("test_value".to_string()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_crash_recovery() -> Result<()> {
|
||||
let mut conn = Connection::open_in_memory()?;
|
||||
create_persistence_schema(&conn)?;
|
||||
|
||||
// Simulate first startup - should report as crash (no clean shutdown marker)
|
||||
let clean = check_clean_shutdown(&mut conn)?;
|
||||
assert!(!clean, "First startup should be detected as crash");
|
||||
|
||||
// Mark clean shutdown
|
||||
mark_clean_shutdown(&mut conn)?;
|
||||
|
||||
// Next startup should report clean shutdown
|
||||
let clean = check_clean_shutdown(&mut conn)?;
|
||||
assert!(clean, "Should detect clean shutdown");
|
||||
|
||||
// After checking clean shutdown, flag should be reset to false
|
||||
// So if we check again without marking, it should report as crash
|
||||
let value = get_session_state(&conn, "clean_shutdown")?;
|
||||
assert_eq!(
|
||||
value,
|
||||
Some("false".to_string()),
|
||||
"Flag should be reset after check"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
124
crates/libmarathon/src/persistence/error.rs
Normal file
124
crates/libmarathon/src/persistence/error.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
//! Error types for the persistence layer
|
||||
|
||||
use std::fmt;
|
||||
|
||||
/// Result type for persistence operations
|
||||
pub type Result<T> = std::result::Result<T, PersistenceError>;
|
||||
|
||||
/// Errors that can occur in the persistence layer
|
||||
#[derive(Debug)]
|
||||
pub enum PersistenceError {
|
||||
/// Database operation failed
|
||||
Database(rusqlite::Error),
|
||||
|
||||
/// Serialization failed
|
||||
Serialization(bincode::Error),
|
||||
|
||||
/// Deserialization failed
|
||||
Deserialization(String),
|
||||
|
||||
/// Configuration error
|
||||
Config(String),
|
||||
|
||||
/// I/O error (file operations, WAL checks, etc.)
|
||||
Io(std::io::Error),
|
||||
|
||||
/// Type not found in registry
|
||||
TypeNotRegistered(String),
|
||||
|
||||
/// Entity or component not found
|
||||
NotFound(String),
|
||||
|
||||
/// Circuit breaker is open, operation blocked
|
||||
CircuitBreakerOpen {
|
||||
consecutive_failures: u32,
|
||||
retry_after_secs: u64,
|
||||
},
|
||||
|
||||
/// Component data exceeds maximum size
|
||||
ComponentTooLarge {
|
||||
component_type: String,
|
||||
size_bytes: usize,
|
||||
max_bytes: usize,
|
||||
},
|
||||
|
||||
/// Other error
|
||||
Other(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for PersistenceError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
| Self::Database(err) => write!(f, "Database error: {}", err),
|
||||
| Self::Serialization(err) => write!(f, "Serialization error: {}", err),
|
||||
| Self::Deserialization(msg) => write!(f, "Deserialization error: {}", msg),
|
||||
| Self::Config(msg) => write!(f, "Configuration error: {}", msg),
|
||||
| Self::Io(err) => write!(f, "I/O error: {}", err),
|
||||
| Self::TypeNotRegistered(type_name) => {
|
||||
write!(f, "Type not registered in type registry: {}", type_name)
|
||||
},
|
||||
| Self::NotFound(msg) => write!(f, "Not found: {}", msg),
|
||||
| Self::CircuitBreakerOpen {
|
||||
consecutive_failures,
|
||||
retry_after_secs,
|
||||
} => write!(
|
||||
f,
|
||||
"Circuit breaker open after {} consecutive failures, retry after {} seconds",
|
||||
consecutive_failures, retry_after_secs
|
||||
),
|
||||
| Self::ComponentTooLarge {
|
||||
component_type,
|
||||
size_bytes,
|
||||
max_bytes,
|
||||
} => write!(
|
||||
f,
|
||||
"Component '{}' size ({} bytes) exceeds maximum ({} bytes). \
|
||||
This may indicate unbounded data growth or serialization issues.",
|
||||
component_type, size_bytes, max_bytes
|
||||
),
|
||||
| Self::Other(msg) => write!(f, "{}", msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for PersistenceError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
match self {
|
||||
| Self::Database(err) => Some(err),
|
||||
| Self::Serialization(err) => Some(err),
|
||||
| Self::Io(err) => Some(err),
|
||||
| _ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Conversions from common error types
|
||||
impl From<rusqlite::Error> for PersistenceError {
|
||||
fn from(err: rusqlite::Error) -> Self {
|
||||
Self::Database(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bincode::Error> for PersistenceError {
|
||||
fn from(err: bincode::Error) -> Self {
|
||||
Self::Serialization(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for PersistenceError {
|
||||
fn from(err: std::io::Error) -> Self {
|
||||
Self::Io(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<toml::de::Error> for PersistenceError {
|
||||
fn from(err: toml::de::Error) -> Self {
|
||||
Self::Config(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<toml::ser::Error> for PersistenceError {
|
||||
fn from(err: toml::ser::Error) -> Self {
|
||||
Self::Config(err.to_string())
|
||||
}
|
||||
}
|
||||
218
crates/libmarathon/src/persistence/health.rs
Normal file
218
crates/libmarathon/src/persistence/health.rs
Normal file
@@ -0,0 +1,218 @@
|
||||
//! Health monitoring and error recovery for persistence layer
|
||||
|
||||
use std::time::{
|
||||
Duration,
|
||||
Instant,
|
||||
};
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
/// Base delay for exponential backoff in milliseconds
|
||||
const BASE_RETRY_DELAY_MS: u64 = 1000; // 1 second
|
||||
|
||||
/// Maximum retry delay in milliseconds (caps exponential backoff)
|
||||
const MAX_RETRY_DELAY_MS: u64 = 30000; // 30 seconds
|
||||
|
||||
/// Maximum exponent for exponential backoff calculation
|
||||
const MAX_BACKOFF_EXPONENT: u32 = 5;
|
||||
|
||||
/// Resource to track persistence health and failures
|
||||
#[derive(Resource, Debug)]
|
||||
pub struct PersistenceHealth {
|
||||
/// Number of consecutive flush failures
|
||||
pub consecutive_flush_failures: u32,
|
||||
|
||||
/// Number of consecutive checkpoint failures
|
||||
pub consecutive_checkpoint_failures: u32,
|
||||
|
||||
/// Time of last successful flush
|
||||
pub last_successful_flush: Option<Instant>,
|
||||
|
||||
/// Time of last successful checkpoint
|
||||
pub last_successful_checkpoint: Option<Instant>,
|
||||
|
||||
/// Whether the persistence layer is in circuit breaker mode
|
||||
pub circuit_breaker_open: bool,
|
||||
|
||||
/// When the circuit breaker was opened
|
||||
pub circuit_breaker_opened_at: Option<Instant>,
|
||||
|
||||
/// Total number of failures across the session
|
||||
pub total_failures: u64,
|
||||
}
|
||||
|
||||
impl Default for PersistenceHealth {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
consecutive_flush_failures: 0,
|
||||
consecutive_checkpoint_failures: 0,
|
||||
last_successful_flush: None,
|
||||
last_successful_checkpoint: None,
|
||||
circuit_breaker_open: false,
|
||||
circuit_breaker_opened_at: None,
|
||||
total_failures: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PersistenceHealth {
|
||||
/// How long to keep circuit breaker open before attempting recovery
|
||||
pub const CIRCUIT_BREAKER_COOLDOWN: Duration = Duration::from_secs(60);
|
||||
/// Circuit breaker threshold - open after this many consecutive failures
|
||||
pub const CIRCUIT_BREAKER_THRESHOLD: u32 = 5;
|
||||
|
||||
/// Record a successful flush
|
||||
pub fn record_flush_success(&mut self) {
|
||||
self.consecutive_flush_failures = 0;
|
||||
self.last_successful_flush = Some(Instant::now());
|
||||
|
||||
// Close circuit breaker if it was open
|
||||
if self.circuit_breaker_open {
|
||||
info!("Persistence recovered - closing circuit breaker");
|
||||
self.circuit_breaker_open = false;
|
||||
self.circuit_breaker_opened_at = None;
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a flush failure
|
||||
pub fn record_flush_failure(&mut self) {
|
||||
self.consecutive_flush_failures += 1;
|
||||
self.total_failures += 1;
|
||||
|
||||
if self.consecutive_flush_failures >= Self::CIRCUIT_BREAKER_THRESHOLD {
|
||||
if !self.circuit_breaker_open {
|
||||
warn!(
|
||||
"Opening circuit breaker after {} consecutive flush failures",
|
||||
self.consecutive_flush_failures
|
||||
);
|
||||
self.circuit_breaker_open = true;
|
||||
self.circuit_breaker_opened_at = Some(Instant::now());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a successful checkpoint
|
||||
pub fn record_checkpoint_success(&mut self) {
|
||||
self.consecutive_checkpoint_failures = 0;
|
||||
self.last_successful_checkpoint = Some(Instant::now());
|
||||
}
|
||||
|
||||
/// Record a checkpoint failure
|
||||
pub fn record_checkpoint_failure(&mut self) {
|
||||
self.consecutive_checkpoint_failures += 1;
|
||||
self.total_failures += 1;
|
||||
}
|
||||
|
||||
/// Check if we should attempt operations (circuit breaker state)
|
||||
///
|
||||
/// **CRITICAL FIX**: Now takes `&mut self` to properly reset the circuit
|
||||
/// breaker after cooldown expires. This prevents the circuit breaker
|
||||
/// from remaining permanently open after one post-cooldown failure.
|
||||
pub fn should_attempt_operation(&mut self) -> bool {
|
||||
if !self.circuit_breaker_open {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if cooldown period has elapsed
|
||||
if let Some(opened_at) = self.circuit_breaker_opened_at {
|
||||
if opened_at.elapsed() >= Self::CIRCUIT_BREAKER_COOLDOWN {
|
||||
// Transition to half-open state by resetting the breaker
|
||||
info!(
|
||||
"Circuit breaker cooldown elapsed - entering half-open state (testing recovery)"
|
||||
);
|
||||
self.circuit_breaker_open = false;
|
||||
self.circuit_breaker_opened_at = None;
|
||||
// consecutive_flush_failures is kept to track if this probe succeeds
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Get exponential backoff delay based on consecutive failures
|
||||
pub fn get_retry_delay(&self) -> Duration {
|
||||
// Exponential backoff: 1s, 2s, 4s, 8s, 16s, max 30s
|
||||
let delay_ms = BASE_RETRY_DELAY_MS *
|
||||
2u64.pow(self.consecutive_flush_failures.min(MAX_BACKOFF_EXPONENT));
|
||||
Duration::from_millis(delay_ms.min(MAX_RETRY_DELAY_MS))
|
||||
}
|
||||
}
|
||||
|
||||
/// Message emitted when persistence fails
|
||||
#[derive(Message, Debug, Clone)]
|
||||
pub struct PersistenceFailureEvent {
|
||||
pub error: String,
|
||||
pub consecutive_failures: u32,
|
||||
pub circuit_breaker_open: bool,
|
||||
}
|
||||
|
||||
/// Message emitted when persistence recovers from failures
|
||||
#[derive(Message, Debug, Clone)]
|
||||
pub struct PersistenceRecoveryEvent {
|
||||
pub previous_failures: u32,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_circuit_breaker() {
|
||||
let mut health = PersistenceHealth::default();
|
||||
|
||||
// Should allow operations initially
|
||||
assert!(health.should_attempt_operation());
|
||||
assert!(!health.circuit_breaker_open);
|
||||
|
||||
// Record failures
|
||||
for _ in 0..PersistenceHealth::CIRCUIT_BREAKER_THRESHOLD {
|
||||
health.record_flush_failure();
|
||||
}
|
||||
|
||||
// Circuit breaker should now be open
|
||||
assert!(health.circuit_breaker_open);
|
||||
assert!(!health.should_attempt_operation());
|
||||
|
||||
// Should still block immediately after opening
|
||||
assert!(!health.should_attempt_operation());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recovery() {
|
||||
let mut health = PersistenceHealth::default();
|
||||
|
||||
// Trigger circuit breaker
|
||||
for _ in 0..PersistenceHealth::CIRCUIT_BREAKER_THRESHOLD {
|
||||
health.record_flush_failure();
|
||||
}
|
||||
assert!(health.circuit_breaker_open);
|
||||
|
||||
// Successful flush should close circuit breaker
|
||||
health.record_flush_success();
|
||||
assert!(!health.circuit_breaker_open);
|
||||
assert_eq!(health.consecutive_flush_failures, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exponential_backoff() {
|
||||
let mut health = PersistenceHealth::default();
|
||||
|
||||
// No failures = 1s delay
|
||||
assert_eq!(health.get_retry_delay(), Duration::from_secs(1));
|
||||
|
||||
// 1 failure = 2s
|
||||
health.record_flush_failure();
|
||||
assert_eq!(health.get_retry_delay(), Duration::from_secs(2));
|
||||
|
||||
// 2 failures = 4s
|
||||
health.record_flush_failure();
|
||||
assert_eq!(health.get_retry_delay(), Duration::from_secs(4));
|
||||
|
||||
// Max out at 30s
|
||||
for _ in 0..10 {
|
||||
health.record_flush_failure();
|
||||
}
|
||||
assert_eq!(health.get_retry_delay(), Duration::from_secs(30));
|
||||
}
|
||||
}
|
||||
165
crates/libmarathon/src/persistence/lifecycle.rs
Normal file
165
crates/libmarathon/src/persistence/lifecycle.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
//! iOS lifecycle event handling for persistence
|
||||
//!
|
||||
//! This module provides event types and handlers for iOS application lifecycle
|
||||
//! events that require immediate persistence (e.g., background suspension).
|
||||
//!
|
||||
//! # iOS Integration
|
||||
//!
|
||||
//! To integrate with iOS, wire up these handlers in your app delegate:
|
||||
//!
|
||||
//! ```swift
|
||||
//! // In your iOS app delegate:
|
||||
//! func applicationWillResignActive(_ application: UIApplication) {
|
||||
//! // Send AppLifecycleEvent::WillResignActive to Bevy
|
||||
//! }
|
||||
//!
|
||||
//! func applicationDidEnterBackground(_ application: UIApplication) {
|
||||
//! // Send AppLifecycleEvent::DidEnterBackground to Bevy
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::persistence::*;
|
||||
|
||||
/// Application lifecycle events that require persistence handling
|
||||
///
|
||||
/// These events are critical moments where data must be flushed immediately
|
||||
/// to avoid data loss.
|
||||
#[derive(Debug, Clone, Message)]
|
||||
pub enum AppLifecycleEvent {
|
||||
/// Application will resign active (iOS: `applicationWillResignActive`)
|
||||
///
|
||||
/// Sent when the app is about to move from active to inactive state.
|
||||
/// Example: incoming phone call, user switches to another app
|
||||
WillResignActive,
|
||||
|
||||
/// Application did enter background (iOS: `applicationDidEnterBackground`)
|
||||
///
|
||||
/// Sent when the app has moved to the background. The app has approximately
|
||||
/// 5 seconds to complete critical tasks before suspension.
|
||||
DidEnterBackground,
|
||||
|
||||
/// Application will enter foreground (iOS:
|
||||
/// `applicationWillEnterForeground`)
|
||||
///
|
||||
/// Sent when the app is about to enter the foreground (user returning to
|
||||
/// app).
|
||||
WillEnterForeground,
|
||||
|
||||
/// Application did become active (iOS: `applicationDidBecomeActive`)
|
||||
///
|
||||
/// Sent when the app has become active and is ready to receive user input.
|
||||
DidBecomeActive,
|
||||
|
||||
/// Application will terminate (iOS: `applicationWillTerminate`)
|
||||
///
|
||||
/// Sent when the app is about to terminate. Similar to shutdown but from
|
||||
/// OS.
|
||||
WillTerminate,
|
||||
}
|
||||
|
||||
/// System to handle iOS lifecycle events and trigger immediate persistence
|
||||
///
|
||||
/// This system listens for lifecycle events and performs immediate flushes
|
||||
/// when the app is backgrounding or terminating.
|
||||
pub fn lifecycle_event_system(
|
||||
mut events: MessageReader<AppLifecycleEvent>,
|
||||
mut write_buffer: ResMut<WriteBufferResource>,
|
||||
db: Res<PersistenceDb>,
|
||||
mut metrics: ResMut<PersistenceMetrics>,
|
||||
mut health: ResMut<PersistenceHealth>,
|
||||
mut pending_tasks: ResMut<PendingFlushTasks>,
|
||||
) {
|
||||
for event in events.read() {
|
||||
match event {
|
||||
| AppLifecycleEvent::WillResignActive => {
|
||||
// App is becoming inactive - perform immediate flush
|
||||
info!("App will resign active - performing immediate flush");
|
||||
|
||||
if let Err(e) = force_flush(&mut write_buffer, &db, &mut metrics) {
|
||||
error!("Failed to flush on resign active: {}", e);
|
||||
health.record_flush_failure();
|
||||
} else {
|
||||
health.record_flush_success();
|
||||
}
|
||||
},
|
||||
|
||||
| AppLifecycleEvent::DidEnterBackground => {
|
||||
// App entered background - perform immediate flush and checkpoint
|
||||
info!("App entered background - performing immediate flush and checkpoint");
|
||||
|
||||
// Force immediate flush
|
||||
if let Err(e) = force_flush(&mut write_buffer, &db, &mut metrics) {
|
||||
error!("Failed to flush on background: {}", e);
|
||||
health.record_flush_failure();
|
||||
} else {
|
||||
health.record_flush_success();
|
||||
}
|
||||
|
||||
// Also checkpoint the WAL to ensure durability
|
||||
let start = std::time::Instant::now();
|
||||
match db.lock() {
|
||||
| Ok(mut conn) => match checkpoint_wal(&mut conn, CheckpointMode::Passive) {
|
||||
| Ok(_) => {
|
||||
let duration = start.elapsed();
|
||||
metrics.record_checkpoint(duration);
|
||||
health.record_checkpoint_success();
|
||||
info!("Background checkpoint completed successfully");
|
||||
},
|
||||
| Err(e) => {
|
||||
error!("Failed to checkpoint on background: {}", e);
|
||||
health.record_checkpoint_failure();
|
||||
},
|
||||
},
|
||||
| Err(e) => {
|
||||
error!("Failed to acquire database lock for checkpoint: {}", e);
|
||||
health.record_checkpoint_failure();
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
| AppLifecycleEvent::WillTerminate => {
|
||||
// App will terminate - perform shutdown sequence
|
||||
warn!("App will terminate - performing shutdown sequence");
|
||||
|
||||
if let Err(e) = shutdown_system(
|
||||
&mut write_buffer,
|
||||
&db,
|
||||
&mut metrics,
|
||||
Some(&mut pending_tasks),
|
||||
) {
|
||||
error!("Failed to perform shutdown on terminate: {}", e);
|
||||
} else {
|
||||
info!("Clean shutdown completed on terminate");
|
||||
}
|
||||
},
|
||||
|
||||
| AppLifecycleEvent::WillEnterForeground => {
|
||||
// App returning from background - no immediate action needed
|
||||
info!("App will enter foreground");
|
||||
},
|
||||
|
||||
| AppLifecycleEvent::DidBecomeActive => {
|
||||
// App became active - no immediate action needed
|
||||
info!("App did become active");
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_lifecycle_event_creation() {
|
||||
let event = AppLifecycleEvent::WillResignActive;
|
||||
match event {
|
||||
| AppLifecycleEvent::WillResignActive => {
|
||||
// Success
|
||||
},
|
||||
| _ => panic!("Event type mismatch"),
|
||||
}
|
||||
}
|
||||
}
|
||||
211
crates/libmarathon/src/persistence/metrics.rs
Normal file
211
crates/libmarathon/src/persistence/metrics.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
//! Metrics tracking for persistence layer
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
/// Metrics for monitoring persistence performance
|
||||
#[derive(Debug, Clone, Default, bevy::prelude::Resource)]
|
||||
pub struct PersistenceMetrics {
|
||||
// Write volume
|
||||
pub total_writes: u64,
|
||||
pub bytes_written: u64,
|
||||
|
||||
// Timing
|
||||
pub flush_count: u64,
|
||||
pub total_flush_duration: Duration,
|
||||
pub checkpoint_count: u64,
|
||||
pub total_checkpoint_duration: Duration,
|
||||
|
||||
// WAL health
|
||||
pub wal_size_bytes: u64,
|
||||
pub max_wal_size_bytes: u64,
|
||||
|
||||
// Recovery
|
||||
pub crash_recovery_count: u64,
|
||||
pub clean_shutdown_count: u64,
|
||||
|
||||
// Buffer stats
|
||||
pub max_buffer_size: usize,
|
||||
pub total_coalesced_ops: u64,
|
||||
}
|
||||
|
||||
impl PersistenceMetrics {
|
||||
/// Record a flush operation
|
||||
pub fn record_flush(&mut self, operations: usize, duration: Duration, bytes_written: u64) {
|
||||
self.flush_count += 1;
|
||||
self.total_writes += operations as u64;
|
||||
self.total_flush_duration += duration;
|
||||
self.bytes_written += bytes_written;
|
||||
}
|
||||
|
||||
/// Record a checkpoint operation
|
||||
pub fn record_checkpoint(&mut self, duration: Duration) {
|
||||
self.checkpoint_count += 1;
|
||||
self.total_checkpoint_duration += duration;
|
||||
}
|
||||
|
||||
/// Update WAL size
|
||||
pub fn update_wal_size(&mut self, size: u64) {
|
||||
self.wal_size_bytes = size;
|
||||
if size > self.max_wal_size_bytes {
|
||||
self.max_wal_size_bytes = size;
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a crash recovery
|
||||
pub fn record_crash_recovery(&mut self) {
|
||||
self.crash_recovery_count += 1;
|
||||
}
|
||||
|
||||
/// Record a clean shutdown
|
||||
pub fn record_clean_shutdown(&mut self) {
|
||||
self.clean_shutdown_count += 1;
|
||||
}
|
||||
|
||||
/// Record buffer stats
|
||||
pub fn record_buffer_stats(&mut self, buffer_size: usize, coalesced: u64) {
|
||||
if buffer_size > self.max_buffer_size {
|
||||
self.max_buffer_size = buffer_size;
|
||||
}
|
||||
self.total_coalesced_ops += coalesced;
|
||||
}
|
||||
|
||||
/// Get average flush duration
|
||||
pub fn avg_flush_duration(&self) -> Duration {
|
||||
if self.flush_count == 0 {
|
||||
Duration::from_secs(0)
|
||||
} else {
|
||||
self.total_flush_duration / self.flush_count as u32
|
||||
}
|
||||
}
|
||||
|
||||
/// Get average checkpoint duration
|
||||
pub fn avg_checkpoint_duration(&self) -> Duration {
|
||||
if self.checkpoint_count == 0 {
|
||||
Duration::from_secs(0)
|
||||
} else {
|
||||
self.total_checkpoint_duration / self.checkpoint_count as u32
|
||||
}
|
||||
}
|
||||
|
||||
/// Get crash recovery rate
|
||||
pub fn crash_recovery_rate(&self) -> f64 {
|
||||
let total = self.crash_recovery_count + self.clean_shutdown_count;
|
||||
if total == 0 {
|
||||
0.0
|
||||
} else {
|
||||
self.crash_recovery_count as f64 / total as f64
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if metrics indicate performance issues
|
||||
pub fn check_health(&self) -> Vec<HealthWarning> {
|
||||
let mut warnings = Vec::new();
|
||||
|
||||
// Check flush duration
|
||||
if self.avg_flush_duration() > Duration::from_millis(50) {
|
||||
warnings.push(HealthWarning::SlowFlush(self.avg_flush_duration()));
|
||||
}
|
||||
|
||||
// Check WAL size
|
||||
if self.wal_size_bytes > 5 * 1024 * 1024 {
|
||||
// 5MB
|
||||
warnings.push(HealthWarning::LargeWal(self.wal_size_bytes));
|
||||
}
|
||||
|
||||
// Check crash rate
|
||||
if self.crash_recovery_rate() > 0.1 {
|
||||
warnings.push(HealthWarning::HighCrashRate(self.crash_recovery_rate()));
|
||||
}
|
||||
|
||||
warnings
|
||||
}
|
||||
|
||||
/// Reset all metrics
|
||||
pub fn reset(&mut self) {
|
||||
*self = Self::default();
|
||||
}
|
||||
}
|
||||
|
||||
/// Health warnings for persistence metrics
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HealthWarning {
|
||||
/// Flush operations are taking too long
|
||||
SlowFlush(Duration),
|
||||
|
||||
/// WAL file is too large
|
||||
LargeWal(u64),
|
||||
|
||||
/// High crash recovery rate
|
||||
HighCrashRate(f64),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for HealthWarning {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
| HealthWarning::SlowFlush(duration) => {
|
||||
write!(f, "Flush duration ({:?}) exceeds 50ms threshold", duration)
|
||||
},
|
||||
| HealthWarning::LargeWal(size) => {
|
||||
write!(f, "WAL size ({} bytes) exceeds 5MB threshold", size)
|
||||
},
|
||||
| HealthWarning::HighCrashRate(rate) => {
|
||||
write!(
|
||||
f,
|
||||
"Crash recovery rate ({:.1}%) exceeds 10% threshold",
|
||||
rate * 100.0
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_metrics_recording() {
|
||||
let mut metrics = PersistenceMetrics::default();
|
||||
|
||||
metrics.record_flush(10, Duration::from_millis(5), 1024);
|
||||
assert_eq!(metrics.flush_count, 1);
|
||||
assert_eq!(metrics.total_writes, 10);
|
||||
assert_eq!(metrics.bytes_written, 1024);
|
||||
|
||||
metrics.record_checkpoint(Duration::from_millis(10));
|
||||
assert_eq!(metrics.checkpoint_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_average_calculations() {
|
||||
let mut metrics = PersistenceMetrics::default();
|
||||
|
||||
metrics.record_flush(10, Duration::from_millis(10), 1024);
|
||||
metrics.record_flush(20, Duration::from_millis(20), 2048);
|
||||
|
||||
assert_eq!(metrics.avg_flush_duration(), Duration::from_millis(15));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_warnings() {
|
||||
let mut metrics = PersistenceMetrics::default();
|
||||
|
||||
// Add slow flush
|
||||
metrics.record_flush(10, Duration::from_millis(100), 1024);
|
||||
|
||||
let warnings = metrics.check_health();
|
||||
assert_eq!(warnings.len(), 1);
|
||||
assert!(matches!(warnings[0], HealthWarning::SlowFlush(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_crash_recovery_rate() {
|
||||
let mut metrics = PersistenceMetrics::default();
|
||||
|
||||
metrics.record_crash_recovery();
|
||||
metrics.record_clean_shutdown();
|
||||
metrics.record_clean_shutdown();
|
||||
|
||||
assert_eq!(metrics.crash_recovery_rate(), 1.0 / 3.0);
|
||||
}
|
||||
}
|
||||
189
crates/libmarathon/src/persistence/migrations.rs
Normal file
189
crates/libmarathon/src/persistence/migrations.rs
Normal file
@@ -0,0 +1,189 @@
|
||||
//! Database migration system
|
||||
//!
|
||||
//! Provides versioned schema migrations for SQLite database evolution.
|
||||
|
||||
use rusqlite::Connection;
|
||||
|
||||
use crate::persistence::error::Result;
|
||||
|
||||
/// Migration metadata
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Migration {
|
||||
/// Migration version number
|
||||
pub version: i64,
|
||||
/// Migration name/description
|
||||
pub name: &'static str,
|
||||
/// SQL statements to apply
|
||||
pub up: &'static str,
|
||||
}
|
||||
|
||||
/// All available migrations in order
|
||||
pub const MIGRATIONS: &[Migration] = &[
|
||||
Migration {
|
||||
version: 1,
|
||||
name: "initial_schema",
|
||||
up: include_str!("migrations/001_initial_schema.sql"),
|
||||
},
|
||||
Migration {
|
||||
version: 4,
|
||||
name: "sessions",
|
||||
up: include_str!("migrations/004_sessions.sql"),
|
||||
},
|
||||
];
|
||||
|
||||
/// Initialize the migrations table
|
||||
fn create_migrations_table(conn: &Connection) -> Result<()> {
|
||||
conn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||
version INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
applied_at INTEGER NOT NULL
|
||||
)",
|
||||
[],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the current schema version
|
||||
pub fn get_current_version(conn: &Connection) -> Result<i64> {
|
||||
create_migrations_table(conn)?;
|
||||
|
||||
let version = conn
|
||||
.query_row(
|
||||
"SELECT COALESCE(MAX(version), 0) FROM schema_migrations",
|
||||
[],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.unwrap_or(0);
|
||||
|
||||
Ok(version)
|
||||
}
|
||||
|
||||
/// Check if a migration has been applied
|
||||
fn is_migration_applied(conn: &Connection, version: i64) -> Result<bool> {
|
||||
let count: i64 = conn.query_row(
|
||||
"SELECT COUNT(*) FROM schema_migrations WHERE version = ?1",
|
||||
[version],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(count > 0)
|
||||
}
|
||||
|
||||
/// Apply a single migration
|
||||
fn apply_migration(conn: &mut Connection, migration: &Migration) -> Result<()> {
|
||||
tracing::info!(
|
||||
"Applying migration {} ({})",
|
||||
migration.version,
|
||||
migration.name
|
||||
);
|
||||
|
||||
let tx = conn.transaction()?;
|
||||
|
||||
// Execute the migration SQL
|
||||
tx.execute_batch(migration.up)?;
|
||||
|
||||
// Record that we applied this migration
|
||||
tx.execute(
|
||||
"INSERT INTO schema_migrations (version, name, applied_at)
|
||||
VALUES (?1, ?2, ?3)",
|
||||
rusqlite::params![
|
||||
migration.version,
|
||||
migration.name,
|
||||
chrono::Utc::now().timestamp(),
|
||||
],
|
||||
)?;
|
||||
|
||||
tx.commit()?;
|
||||
|
||||
tracing::info!(
|
||||
"Migration {} ({}) applied successfully",
|
||||
migration.version,
|
||||
migration.name
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Run all pending migrations
|
||||
pub fn run_migrations(conn: &mut Connection) -> Result<()> {
|
||||
create_migrations_table(conn)?;
|
||||
|
||||
let current_version = get_current_version(conn)?;
|
||||
tracing::info!("Current schema version: {}", current_version);
|
||||
|
||||
let mut applied_count = 0;
|
||||
|
||||
for migration in MIGRATIONS {
|
||||
if !is_migration_applied(conn, migration.version)? {
|
||||
apply_migration(conn, migration)?;
|
||||
applied_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if applied_count > 0 {
|
||||
tracing::info!("Applied {} migration(s)", applied_count);
|
||||
} else {
|
||||
tracing::debug!("No pending migrations");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rusqlite::Connection;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_migration_system() {
|
||||
let mut conn = Connection::open_in_memory().unwrap();
|
||||
|
||||
// Initially at version 0
|
||||
assert_eq!(get_current_version(&conn).unwrap(), 0);
|
||||
|
||||
// Run migrations
|
||||
run_migrations(&mut conn).unwrap();
|
||||
|
||||
// Should be at latest version
|
||||
let latest_version = MIGRATIONS.last().unwrap().version;
|
||||
assert_eq!(get_current_version(&conn).unwrap(), latest_version);
|
||||
|
||||
// Running again should be a no-op
|
||||
run_migrations(&mut conn).unwrap();
|
||||
assert_eq!(get_current_version(&conn).unwrap(), latest_version);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrations_table_created() {
|
||||
let conn = Connection::open_in_memory().unwrap();
|
||||
create_migrations_table(&conn).unwrap();
|
||||
|
||||
// Should be able to query the table
|
||||
let count: i64 = conn
|
||||
.query_row("SELECT COUNT(*) FROM schema_migrations", [], |row| {
|
||||
row.get(0)
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_migration_applied() {
|
||||
let conn = Connection::open_in_memory().unwrap();
|
||||
create_migrations_table(&conn).unwrap();
|
||||
|
||||
// Migration 1 should not be applied yet
|
||||
assert!(!is_migration_applied(&conn, 1).unwrap());
|
||||
|
||||
// Apply migration 1
|
||||
conn.execute(
|
||||
"INSERT INTO schema_migrations (version, name, applied_at) VALUES (1, 'test', 0)",
|
||||
[],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Now it should be applied
|
||||
assert!(is_migration_applied(&conn, 1).unwrap());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
-- Migration 001: Initial schema
|
||||
-- Creates the base tables for entity persistence and CRDT sync
|
||||
|
||||
-- Entities table - stores entity metadata
|
||||
CREATE TABLE IF NOT EXISTS entities (
|
||||
id BLOB PRIMARY KEY,
|
||||
entity_type TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
);
|
||||
|
||||
-- Components table - stores serialized component data
|
||||
CREATE TABLE IF NOT EXISTS components (
|
||||
entity_id BLOB NOT NULL,
|
||||
component_type TEXT NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
updated_at INTEGER NOT NULL,
|
||||
PRIMARY KEY (entity_id, component_type),
|
||||
FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Index for querying components by entity
|
||||
CREATE INDEX IF NOT EXISTS idx_components_entity
|
||||
ON components(entity_id);
|
||||
|
||||
-- Operation log - for CRDT sync protocol
|
||||
CREATE TABLE IF NOT EXISTS operation_log (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
node_id TEXT NOT NULL,
|
||||
sequence_number INTEGER NOT NULL,
|
||||
operation BLOB NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
UNIQUE(node_id, sequence_number)
|
||||
);
|
||||
|
||||
-- Index for efficient operation log queries
|
||||
CREATE INDEX IF NOT EXISTS idx_oplog_node_seq
|
||||
ON operation_log(node_id, sequence_number);
|
||||
|
||||
-- Vector clock table - for causality tracking
|
||||
CREATE TABLE IF NOT EXISTS vector_clock (
|
||||
node_id TEXT PRIMARY KEY,
|
||||
counter INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
);
|
||||
|
||||
-- Session state table - for crash detection
|
||||
CREATE TABLE IF NOT EXISTS session_state (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
);
|
||||
|
||||
-- WAL checkpoint tracking
|
||||
CREATE TABLE IF NOT EXISTS checkpoint_state (
|
||||
last_checkpoint INTEGER NOT NULL,
|
||||
wal_size_bytes INTEGER NOT NULL
|
||||
);
|
||||
|
||||
-- Initialize checkpoint state if not exists
|
||||
INSERT OR IGNORE INTO checkpoint_state (rowid, last_checkpoint, wal_size_bytes)
|
||||
VALUES (1, strftime('%s', 'now'), 0);
|
||||
@@ -0,0 +1,51 @@
|
||||
-- Migration 004: Add session support
|
||||
-- Adds session tables and session-scopes existing tables
|
||||
|
||||
-- Sessions table
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id BLOB PRIMARY KEY,
|
||||
code TEXT NOT NULL,
|
||||
name TEXT,
|
||||
created_at INTEGER NOT NULL,
|
||||
last_active INTEGER NOT NULL,
|
||||
entity_count INTEGER NOT NULL DEFAULT 0,
|
||||
state TEXT NOT NULL,
|
||||
secret BLOB,
|
||||
UNIQUE(id),
|
||||
UNIQUE(code)
|
||||
);
|
||||
|
||||
-- Index for finding recent sessions
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_last_active
|
||||
ON sessions(last_active DESC);
|
||||
|
||||
-- Session membership (which node was in which session)
|
||||
CREATE TABLE IF NOT EXISTS session_membership (
|
||||
session_id BLOB NOT NULL,
|
||||
node_id TEXT NOT NULL,
|
||||
joined_at INTEGER NOT NULL,
|
||||
left_at INTEGER,
|
||||
PRIMARY KEY (session_id, node_id),
|
||||
FOREIGN KEY (session_id) REFERENCES sessions(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Add session_id to entities table
|
||||
ALTER TABLE entities ADD COLUMN session_id BLOB;
|
||||
|
||||
-- Index for session-scoped entity queries
|
||||
CREATE INDEX IF NOT EXISTS idx_entities_session
|
||||
ON entities(session_id);
|
||||
|
||||
-- Add session_id to vector_clock
|
||||
ALTER TABLE vector_clock ADD COLUMN session_id BLOB;
|
||||
|
||||
-- Composite index for session + node lookups
|
||||
CREATE INDEX IF NOT EXISTS idx_vector_clock_session_node
|
||||
ON vector_clock(session_id, node_id);
|
||||
|
||||
-- Add session_id to operation_log
|
||||
ALTER TABLE operation_log ADD COLUMN session_id BLOB;
|
||||
|
||||
-- Index for session-scoped operation queries
|
||||
CREATE INDEX IF NOT EXISTS idx_operation_log_session
|
||||
ON operation_log(session_id, node_id, sequence_number);
|
||||
55
crates/libmarathon/src/persistence/mod.rs
Normal file
55
crates/libmarathon/src/persistence/mod.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
//! Persistence layer for battery-efficient state management
|
||||
//!
|
||||
//! This module implements the persistence strategy defined in RFC 0002.
|
||||
//! It provides a three-tier system to minimize disk I/O while maintaining data
|
||||
//! durability:
|
||||
//!
|
||||
//! 1. **In-Memory Dirty Tracking** - Track changes without writing immediately
|
||||
//! 2. **Write Buffer** - Batch and coalesce operations before writing
|
||||
//! 3. **SQLite with WAL Mode** - Controlled checkpoints to minimize fsync()
|
||||
//! calls
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use bevy::prelude::*;
|
||||
//! use libmarathon::persistence::*;
|
||||
//!
|
||||
//! fn setup(mut commands: Commands) {
|
||||
//! // Spawn an entity with the Persisted marker
|
||||
//! commands.spawn(Persisted::new());
|
||||
//! }
|
||||
//!
|
||||
//! // The persistence plugin automatically tracks changes to Persisted components
|
||||
//! fn main() {
|
||||
//! App::new()
|
||||
//! .add_plugins(DefaultPlugins)
|
||||
//! .add_plugins(PersistencePlugin::new("app.db"))
|
||||
//! .add_systems(Startup, setup)
|
||||
//! .run();
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
mod config;
|
||||
mod database;
|
||||
mod error;
|
||||
mod health;
|
||||
mod lifecycle;
|
||||
mod metrics;
|
||||
mod migrations;
|
||||
mod plugin;
|
||||
pub mod reflection;
|
||||
mod systems;
|
||||
mod types;
|
||||
|
||||
pub use config::*;
|
||||
pub use database::*;
|
||||
pub use error::*;
|
||||
pub use health::*;
|
||||
pub use lifecycle::*;
|
||||
pub use metrics::*;
|
||||
pub use migrations::*;
|
||||
pub use plugin::*;
|
||||
pub use reflection::*;
|
||||
pub use systems::*;
|
||||
pub use types::*;
|
||||
313
crates/libmarathon/src/persistence/plugin.rs
Normal file
313
crates/libmarathon/src/persistence/plugin.rs
Normal file
@@ -0,0 +1,313 @@
|
||||
//! Bevy plugin for the persistence layer
|
||||
//!
|
||||
//! This module provides a Bevy plugin that sets up all the necessary resources
|
||||
//! and systems for the persistence layer.
|
||||
|
||||
use std::{
|
||||
ops::{
|
||||
Deref,
|
||||
DerefMut,
|
||||
},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use bevy::prelude::*;
|
||||
|
||||
use crate::persistence::*;
|
||||
|
||||
/// Bevy plugin for persistence
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```no_run
|
||||
/// use bevy::prelude::*;
|
||||
/// use libmarathon::persistence::PersistencePlugin;
|
||||
///
|
||||
/// App::new()
|
||||
/// .add_plugins(PersistencePlugin::new("app.db"))
|
||||
/// .run();
|
||||
/// ```
|
||||
pub struct PersistencePlugin {
|
||||
/// Path to the SQLite database file
|
||||
pub db_path: PathBuf,
|
||||
|
||||
/// Persistence configuration
|
||||
pub config: PersistenceConfig,
|
||||
}
|
||||
|
||||
impl PersistencePlugin {
|
||||
/// Create a new persistence plugin with default configuration
|
||||
pub fn new(db_path: impl Into<PathBuf>) -> Self {
|
||||
Self {
|
||||
db_path: db_path.into(),
|
||||
config: PersistenceConfig::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new persistence plugin with custom configuration
|
||||
pub fn with_config(db_path: impl Into<PathBuf>, config: PersistenceConfig) -> Self {
|
||||
Self {
|
||||
db_path: db_path.into(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
/// Load configuration from a TOML file
|
||||
pub fn with_config_file(
|
||||
db_path: impl Into<PathBuf>,
|
||||
config_path: impl AsRef<std::path::Path>,
|
||||
) -> crate::persistence::error::Result<Self> {
|
||||
let config = load_config_from_file(config_path)?;
|
||||
Ok(Self {
|
||||
db_path: db_path.into(),
|
||||
config,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Plugin for PersistencePlugin {
|
||||
fn build(&self, app: &mut App) {
|
||||
// Initialize database
|
||||
let db = PersistenceDb::from_path(&self.db_path)
|
||||
.expect("Failed to initialize persistence database");
|
||||
|
||||
// Register types for reflection
|
||||
app.register_type::<Persisted>();
|
||||
|
||||
// Add messages/events
|
||||
app.add_message::<PersistenceFailureEvent>()
|
||||
.add_message::<PersistenceRecoveryEvent>()
|
||||
.add_message::<AppLifecycleEvent>();
|
||||
|
||||
// Insert resources
|
||||
app.insert_resource(db)
|
||||
.insert_resource(DirtyEntitiesResource::default())
|
||||
.insert_resource(WriteBufferResource::new(self.config.max_buffer_operations))
|
||||
.insert_resource(self.config.clone())
|
||||
.insert_resource(BatteryStatus::default())
|
||||
.insert_resource(PersistenceMetrics::default())
|
||||
.insert_resource(CheckpointTimer::default())
|
||||
.insert_resource(PersistenceHealth::default())
|
||||
.insert_resource(PendingFlushTasks::default());
|
||||
|
||||
// Add startup system
|
||||
app.add_systems(Startup, persistence_startup_system);
|
||||
|
||||
// Add systems in the appropriate schedule
|
||||
app.add_systems(
|
||||
Update,
|
||||
(
|
||||
lifecycle_event_system,
|
||||
collect_dirty_entities_bevy_system,
|
||||
flush_system,
|
||||
checkpoint_bevy_system,
|
||||
)
|
||||
.chain(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Resource wrapper for DirtyEntities
|
||||
#[derive(Resource, Default)]
|
||||
pub struct DirtyEntitiesResource(pub DirtyEntities);
|
||||
|
||||
impl std::ops::Deref for DirtyEntitiesResource {
|
||||
type Target = DirtyEntities;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for DirtyEntitiesResource {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Resource wrapper for WriteBuffer
|
||||
#[derive(Resource)]
|
||||
pub struct WriteBufferResource(pub WriteBuffer);
|
||||
|
||||
impl WriteBufferResource {
|
||||
pub fn new(max_operations: usize) -> Self {
|
||||
Self(WriteBuffer::new(max_operations))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for WriteBufferResource {
|
||||
type Target = WriteBuffer;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for WriteBufferResource {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Startup system to initialize persistence
|
||||
fn persistence_startup_system(db: Res<PersistenceDb>, mut metrics: ResMut<PersistenceMetrics>) {
|
||||
if let Err(e) = startup_system(db.deref(), metrics.deref_mut()) {
|
||||
error!("Failed to initialize persistence: {}", e);
|
||||
} else {
|
||||
info!("Persistence system initialized");
|
||||
}
|
||||
}
|
||||
|
||||
/// System to collect dirty entities using Bevy's change detection
|
||||
///
|
||||
/// This system tracks changes to the `Persisted` component. When `Persisted` is
|
||||
/// marked as changed (via `mark_dirty()` or direct mutation), ALL components on
|
||||
/// that entity are serialized and added to the write buffer.
|
||||
///
|
||||
/// For automatic tracking without manual `mark_dirty()` calls, use the
|
||||
/// `auto_track_component_changes_system` which automatically detects changes
|
||||
/// to common components like Transform, GlobalTransform, etc.
|
||||
fn collect_dirty_entities_bevy_system(world: &mut World) {
|
||||
// Collect changed entities first
|
||||
let changed_entities: Vec<(Entity, uuid::Uuid)> = {
|
||||
let mut query = world.query_filtered::<(Entity, &Persisted), Changed<Persisted>>();
|
||||
query
|
||||
.iter(world)
|
||||
.map(|(entity, persisted)| (entity, persisted.network_id))
|
||||
.collect()
|
||||
};
|
||||
|
||||
if changed_entities.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Serialize components for each entity
|
||||
for (entity, network_id) in changed_entities {
|
||||
// First, ensure the entity exists in the database
|
||||
{
|
||||
let now = chrono::Utc::now();
|
||||
let mut write_buffer = world.resource_mut::<WriteBufferResource>();
|
||||
if let Err(e) = write_buffer.add(PersistenceOp::UpsertEntity {
|
||||
id: network_id,
|
||||
data: EntityData {
|
||||
id: network_id,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
entity_type: "NetworkedEntity".to_string(),
|
||||
},
|
||||
}) {
|
||||
error!(
|
||||
"Failed to add UpsertEntity operation for {}: {}",
|
||||
network_id, e
|
||||
);
|
||||
return; // Skip this entity if we can't even add the entity op
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize all components on this entity (generic tracking)
|
||||
let components = {
|
||||
let type_registry = world.resource::<AppTypeRegistry>().read();
|
||||
let comps = serialize_all_components_from_entity(entity, world, &type_registry);
|
||||
drop(type_registry);
|
||||
comps
|
||||
};
|
||||
|
||||
// Add operations for each component
|
||||
for (component_type, data) in components {
|
||||
// Get mutable access to dirty and mark it
|
||||
{
|
||||
let mut dirty = world.resource_mut::<DirtyEntitiesResource>();
|
||||
dirty.mark_dirty(network_id, &component_type);
|
||||
}
|
||||
|
||||
// Get mutable access to write_buffer and add the operation
|
||||
{
|
||||
let mut write_buffer = world.resource_mut::<WriteBufferResource>();
|
||||
if let Err(e) = write_buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id: network_id,
|
||||
component_type: component_type.clone(),
|
||||
data,
|
||||
}) {
|
||||
error!(
|
||||
"Failed to add UpsertComponent operation for entity {} component {}: {}",
|
||||
network_id, component_type, e
|
||||
);
|
||||
// Continue with other components even if one fails
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// System to automatically track changes to common Bevy components
|
||||
///
|
||||
/// This system detects changes to Transform, automatically triggering
|
||||
/// persistence by accessing `Persisted` mutably (which marks it as changed via
|
||||
/// Bevy's change detection).
|
||||
///
|
||||
/// Add this system to your app if you want automatic persistence of Transform
|
||||
/// changes:
|
||||
///
|
||||
/// ```no_run
|
||||
/// # use bevy::prelude::*;
|
||||
/// # use libmarathon::persistence::*;
|
||||
/// App::new()
|
||||
/// .add_plugins(PersistencePlugin::new("app.db"))
|
||||
/// .add_systems(Update, auto_track_transform_changes_system)
|
||||
/// .run();
|
||||
/// ```
|
||||
pub fn auto_track_transform_changes_system(
|
||||
mut query: Query<&mut Persisted, (With<Transform>, Changed<Transform>)>,
|
||||
) {
|
||||
// Simply accessing &mut Persisted triggers Bevy's change detection
|
||||
for _persisted in query.iter_mut() {
|
||||
// No-op - the mutable access itself marks Persisted as changed
|
||||
}
|
||||
}
|
||||
|
||||
/// System to checkpoint the WAL
|
||||
fn checkpoint_bevy_system(
|
||||
db: Res<PersistenceDb>,
|
||||
config: Res<PersistenceConfig>,
|
||||
mut timer: ResMut<CheckpointTimer>,
|
||||
mut metrics: ResMut<PersistenceMetrics>,
|
||||
mut health: ResMut<PersistenceHealth>,
|
||||
) {
|
||||
match checkpoint_system(
|
||||
db.deref(),
|
||||
config.deref(),
|
||||
timer.deref_mut(),
|
||||
metrics.deref_mut(),
|
||||
) {
|
||||
| Ok(_) => {
|
||||
health.record_checkpoint_success();
|
||||
},
|
||||
| Err(e) => {
|
||||
health.record_checkpoint_failure();
|
||||
error!(
|
||||
"Failed to checkpoint WAL (attempt {}): {}",
|
||||
health.consecutive_checkpoint_failures, e
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_plugin_creation() {
|
||||
let plugin = PersistencePlugin::new("test.db");
|
||||
assert_eq!(plugin.db_path, PathBuf::from("test.db"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_plugin_with_config() {
|
||||
let mut config = PersistenceConfig::default();
|
||||
config.flush_interval_secs = 5;
|
||||
|
||||
let plugin = PersistencePlugin::with_config("test.db", config);
|
||||
assert_eq!(plugin.config.flush_interval_secs, 5);
|
||||
}
|
||||
}
|
||||
313
crates/libmarathon/src/persistence/reflection.rs
Normal file
313
crates/libmarathon/src/persistence/reflection.rs
Normal file
@@ -0,0 +1,313 @@
|
||||
//! Reflection-based component serialization for persistence
|
||||
//!
|
||||
//! This module provides utilities to serialize and deserialize Bevy components
|
||||
//! using reflection, allowing the persistence layer to work with any component
|
||||
//! that implements Reflect.
|
||||
|
||||
use bevy::{
|
||||
prelude::*,
|
||||
reflect::{
|
||||
TypeRegistry,
|
||||
serde::{
|
||||
ReflectSerializer,
|
||||
TypedReflectDeserializer,
|
||||
TypedReflectSerializer,
|
||||
},
|
||||
},
|
||||
};
|
||||
use bincode::Options as _;
|
||||
use serde::de::DeserializeSeed;
|
||||
|
||||
use crate::persistence::error::{
|
||||
PersistenceError,
|
||||
Result,
|
||||
};
|
||||
|
||||
/// Marker component to indicate that an entity should be persisted
|
||||
///
|
||||
/// Add this component to any entity that should have its state persisted to
|
||||
/// disk. The persistence system will automatically serialize all components on
|
||||
/// entities with this marker when they change.
|
||||
///
|
||||
/// # Triggering Persistence
|
||||
///
|
||||
/// To trigger persistence after modifying components on an entity, access
|
||||
/// `Persisted` mutably through a query. Bevy's change detection will
|
||||
/// automatically mark it as changed:
|
||||
///
|
||||
/// ```no_run
|
||||
/// # use bevy::prelude::*;
|
||||
/// # use libmarathon::persistence::*;
|
||||
/// fn update_position(mut query: Query<(&mut Transform, &mut Persisted)>) {
|
||||
/// for (mut transform, mut persisted) in query.iter_mut() {
|
||||
/// transform.translation.x += 1.0;
|
||||
/// // Accessing &mut Persisted triggers change detection automatically
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Alternatively, use `auto_track_transform_changes_system` for automatic
|
||||
/// persistence of Transform changes without manual queries.
|
||||
#[derive(Component, Reflect, Default)]
|
||||
#[reflect(Component)]
|
||||
pub struct Persisted {
|
||||
/// Unique network ID for this entity
|
||||
pub network_id: uuid::Uuid,
|
||||
}
|
||||
|
||||
impl Persisted {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
network_id: uuid::Uuid::new_v4(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_id(network_id: uuid::Uuid) -> Self {
|
||||
Self { network_id }
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for components that can be persisted
|
||||
pub trait Persistable: Component + Reflect {
|
||||
/// Get the type name for this component (used as key in database)
|
||||
fn type_name() -> &'static str {
|
||||
std::any::type_name::<Self>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize a component using Bevy's reflection system
|
||||
///
|
||||
/// This converts any component implementing `Reflect` into bytes for storage.
|
||||
/// Uses bincode for efficient binary serialization with type information from
|
||||
/// the registry to handle polymorphic types correctly.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `component`: Component to serialize (must implement `Reflect`)
|
||||
/// - `type_registry`: Bevy's type registry for reflection metadata
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(Vec<u8>)`: Serialized component data
|
||||
/// - `Err`: If serialization fails (e.g., type not properly registered)
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// # use bevy::prelude::*;
|
||||
/// # use libmarathon::persistence::*;
|
||||
/// # fn example(component: &Transform, registry: &AppTypeRegistry) -> anyhow::Result<()> {
|
||||
/// let registry = registry.read();
|
||||
/// let bytes = serialize_component(component.as_reflect(), ®istry)?;
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn serialize_component(
|
||||
component: &dyn Reflect,
|
||||
type_registry: &TypeRegistry,
|
||||
) -> Result<Vec<u8>> {
|
||||
let serializer = ReflectSerializer::new(component, type_registry);
|
||||
bincode::options()
|
||||
.serialize(&serializer)
|
||||
.map_err(PersistenceError::from)
|
||||
}
|
||||
|
||||
/// Serialize a component when the type is known (more efficient for bincode)
|
||||
///
|
||||
/// This uses `TypedReflectSerializer` which doesn't include type path
|
||||
/// information, making it compatible with `TypedReflectDeserializer` for binary
|
||||
/// formats.
|
||||
pub fn serialize_component_typed(
|
||||
component: &dyn Reflect,
|
||||
type_registry: &TypeRegistry,
|
||||
) -> Result<Vec<u8>> {
|
||||
let serializer = TypedReflectSerializer::new(component, type_registry);
|
||||
bincode::options()
|
||||
.serialize(&serializer)
|
||||
.map_err(PersistenceError::from)
|
||||
}
|
||||
|
||||
/// Deserialize a component using Bevy's reflection system
|
||||
///
|
||||
/// Converts serialized bytes back into a reflected component. The returned
|
||||
/// component is boxed and must be downcast to the concrete type for use.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `bytes`: Serialized component data from [`serialize_component`]
|
||||
/// - `type_registry`: Bevy's type registry for reflection metadata
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(Box<dyn PartialReflect>)`: Deserialized component (needs downcasting)
|
||||
/// - `Err`: If deserialization fails (e.g., type not registered, data
|
||||
/// corruption)
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// # use bevy::prelude::*;
|
||||
/// # use libmarathon::persistence::*;
|
||||
/// # fn example(bytes: &[u8], registry: &AppTypeRegistry) -> anyhow::Result<()> {
|
||||
/// let registry = registry.read();
|
||||
/// let reflected = deserialize_component(bytes, ®istry)?;
|
||||
/// // Downcast to concrete type as needed
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn deserialize_component(
|
||||
bytes: &[u8],
|
||||
type_registry: &TypeRegistry,
|
||||
) -> Result<Box<dyn PartialReflect>> {
|
||||
let mut deserializer = bincode::Deserializer::from_slice(bytes, bincode::options());
|
||||
let reflect_deserializer = bevy::reflect::serde::ReflectDeserializer::new(type_registry);
|
||||
|
||||
reflect_deserializer
|
||||
.deserialize(&mut deserializer)
|
||||
.map_err(|e| PersistenceError::Deserialization(e.to_string()))
|
||||
}
|
||||
|
||||
/// Deserialize a component when the type is known
|
||||
///
|
||||
/// Uses `TypedReflectDeserializer` which is more efficient for binary formats
|
||||
/// like bincode when the component type is known at deserialization time.
|
||||
pub fn deserialize_component_typed(
|
||||
bytes: &[u8],
|
||||
component_type: &str,
|
||||
type_registry: &TypeRegistry,
|
||||
) -> Result<Box<dyn PartialReflect>> {
|
||||
let registration = type_registry
|
||||
.get_with_type_path(component_type)
|
||||
.ok_or_else(|| {
|
||||
PersistenceError::Deserialization(format!("Type {} not registered", component_type))
|
||||
})?;
|
||||
|
||||
let mut deserializer = bincode::Deserializer::from_slice(bytes, bincode::options());
|
||||
let reflect_deserializer = TypedReflectDeserializer::new(registration, type_registry);
|
||||
|
||||
reflect_deserializer
|
||||
.deserialize(&mut deserializer)
|
||||
.map_err(|e| PersistenceError::Deserialization(e.to_string()))
|
||||
}
|
||||
|
||||
/// Serialize a component directly from an entity using its type path
|
||||
///
|
||||
/// This is a convenience function that combines type lookup, reflection, and
|
||||
/// serialization. It's the primary method used by the persistence system to
|
||||
/// save component state without knowing the concrete type at compile time.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `entity`: Bevy entity to read the component from
|
||||
/// - `component_type`: Type path string (e.g.,
|
||||
/// "bevy_transform::components::Transform")
|
||||
/// - `world`: Bevy world containing the entity
|
||||
/// - `type_registry`: Bevy's type registry for reflection metadata
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Some(Vec<u8>)`: Serialized component data
|
||||
/// - `None`: If entity doesn't have the component or type isn't registered
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// # use bevy::prelude::*;
|
||||
/// # use libmarathon::persistence::*;
|
||||
/// # fn example(entity: Entity, world: &World, registry: &AppTypeRegistry) -> Option<()> {
|
||||
/// let registry = registry.read();
|
||||
/// let bytes = serialize_component_from_entity(
|
||||
/// entity,
|
||||
/// "bevy_transform::components::Transform",
|
||||
/// world,
|
||||
/// ®istry,
|
||||
/// )?;
|
||||
/// # Some(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn serialize_component_from_entity(
|
||||
entity: Entity,
|
||||
component_type: &str,
|
||||
world: &World,
|
||||
type_registry: &TypeRegistry,
|
||||
) -> Option<Vec<u8>> {
|
||||
// Get the type registration
|
||||
let registration = type_registry.get_with_type_path(component_type)?;
|
||||
|
||||
// Get the ReflectComponent data
|
||||
let reflect_component = registration.data::<ReflectComponent>()?;
|
||||
|
||||
// Reflect the component from the entity
|
||||
let reflected = reflect_component.reflect(world.entity(entity))?;
|
||||
|
||||
// Serialize it directly
|
||||
serialize_component(reflected, type_registry).ok()
|
||||
}
|
||||
|
||||
/// Serialize all components from an entity that have reflection data
|
||||
///
|
||||
/// This iterates over all components on an entity and serializes those that:
|
||||
/// - Are registered in the type registry
|
||||
/// - Have `ReflectComponent` data (meaning they support reflection)
|
||||
/// - Are not the `Persisted` marker component (to avoid redundant storage)
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `entity`: Bevy entity to serialize components from
|
||||
/// - `world`: Bevy world containing the entity
|
||||
/// - `type_registry`: Bevy's type registry for reflection metadata
|
||||
///
|
||||
/// # Returns
|
||||
/// Vector of tuples containing (component_type_path, serialized_data) for each
|
||||
/// component
|
||||
pub fn serialize_all_components_from_entity(
|
||||
entity: Entity,
|
||||
world: &World,
|
||||
type_registry: &TypeRegistry,
|
||||
) -> Vec<(String, Vec<u8>)> {
|
||||
let mut components = Vec::new();
|
||||
|
||||
// Get the entity reference
|
||||
let entity_ref = world.entity(entity);
|
||||
|
||||
// Iterate over all type registrations
|
||||
for registration in type_registry.iter() {
|
||||
// Skip if no ReflectComponent data (not a component)
|
||||
let Some(reflect_component) = registration.data::<ReflectComponent>() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// Get the type path for this component
|
||||
let type_path = registration.type_info().type_path();
|
||||
|
||||
// Skip the Persisted marker component itself (we don't need to persist it)
|
||||
if type_path.ends_with("::Persisted") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try to reflect this component from the entity
|
||||
if let Some(reflected) = reflect_component.reflect(entity_ref) {
|
||||
// Serialize the component using typed serialization for consistency
|
||||
// This matches the format expected by deserialize_component_typed
|
||||
if let Ok(data) = serialize_component_typed(reflected, type_registry) {
|
||||
components.push((type_path.to_string(), data));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
components
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[derive(Component, Reflect, Default)]
|
||||
#[reflect(Component)]
|
||||
struct TestComponent {
|
||||
value: i32,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_component_serialization() -> Result<()> {
|
||||
let mut registry = TypeRegistry::default();
|
||||
registry.register::<TestComponent>();
|
||||
|
||||
let component = TestComponent { value: 42 };
|
||||
let bytes = serialize_component(&component, ®istry)?;
|
||||
|
||||
assert!(!bytes.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
495
crates/libmarathon/src/persistence/systems.rs
Normal file
495
crates/libmarathon/src/persistence/systems.rs
Normal file
@@ -0,0 +1,495 @@
|
||||
//! Bevy systems for the persistence layer
|
||||
//!
|
||||
//! This module provides systems that integrate the persistence layer with
|
||||
//! Bevy's ECS. These systems handle dirty tracking, write buffering, and
|
||||
//! flushing to SQLite.
|
||||
|
||||
use std::{
|
||||
sync::{
|
||||
Arc,
|
||||
Mutex,
|
||||
},
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use bevy::{
|
||||
prelude::*,
|
||||
tasks::{
|
||||
IoTaskPool,
|
||||
Task,
|
||||
},
|
||||
};
|
||||
use futures_lite::future;
|
||||
use rusqlite::Connection;
|
||||
|
||||
use crate::persistence::{
|
||||
error::Result,
|
||||
*,
|
||||
};
|
||||
|
||||
/// Resource wrapping the SQLite connection
|
||||
#[derive(Clone, bevy::prelude::Resource)]
|
||||
pub struct PersistenceDb {
|
||||
pub conn: Arc<Mutex<Connection>>,
|
||||
}
|
||||
|
||||
impl PersistenceDb {
|
||||
pub fn new(conn: Connection) -> Self {
|
||||
Self {
|
||||
conn: Arc::new(Mutex::new(conn)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_path(path: impl AsRef<std::path::Path>) -> Result<Self> {
|
||||
let conn = initialize_persistence_db(path)?;
|
||||
Ok(Self::new(conn))
|
||||
}
|
||||
|
||||
pub fn in_memory() -> Result<Self> {
|
||||
let conn = Connection::open_in_memory()?;
|
||||
configure_sqlite_for_persistence(&conn)?;
|
||||
create_persistence_schema(&conn)?;
|
||||
Ok(Self::new(conn))
|
||||
}
|
||||
|
||||
/// Acquire the database connection with proper error handling
|
||||
///
|
||||
/// Handles mutex poisoning gracefully by converting to PersistenceError.
|
||||
/// If a thread panics while holding the mutex, subsequent lock attempts
|
||||
/// will fail with a poisoned error, which this method converts to a
|
||||
/// recoverable error instead of panicking.
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Ok(MutexGuard<Connection>)`: Locked connection ready for use
|
||||
/// - `Err(PersistenceError)`: If mutex is poisoned
|
||||
pub fn lock(&self) -> Result<std::sync::MutexGuard<'_, Connection>> {
|
||||
self.conn.lock().map_err(|e| {
|
||||
PersistenceError::Other(format!("Database connection mutex poisoned: {}", e))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Resource for tracking when the last checkpoint occurred
|
||||
#[derive(Debug, bevy::prelude::Resource)]
|
||||
pub struct CheckpointTimer {
|
||||
pub last_checkpoint: Instant,
|
||||
}
|
||||
|
||||
impl Default for CheckpointTimer {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
last_checkpoint: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resource for tracking pending async flush tasks
|
||||
#[derive(Default, bevy::prelude::Resource)]
|
||||
pub struct PendingFlushTasks {
|
||||
pub tasks: Vec<Task<Result<FlushResult>>>,
|
||||
}
|
||||
|
||||
/// Result of an async flush operation
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FlushResult {
|
||||
pub operations_count: usize,
|
||||
pub duration: std::time::Duration,
|
||||
pub bytes_written: u64,
|
||||
}
|
||||
|
||||
/// Helper function to calculate total bytes written from operations
|
||||
fn calculate_bytes_written(ops: &[PersistenceOp]) -> u64 {
|
||||
ops.iter()
|
||||
.map(|op| match op {
|
||||
| PersistenceOp::UpsertComponent { data, .. } => data.len() as u64,
|
||||
| PersistenceOp::LogOperation { operation, .. } => operation.len() as u64,
|
||||
| _ => 0,
|
||||
})
|
||||
.sum()
|
||||
}
|
||||
|
||||
/// Helper function to perform a flush with metrics tracking (synchronous)
|
||||
///
|
||||
/// Used for critical operations like shutdown where we need to block
|
||||
fn perform_flush_sync(
|
||||
ops: &[PersistenceOp],
|
||||
db: &PersistenceDb,
|
||||
metrics: &mut PersistenceMetrics,
|
||||
) -> Result<()> {
|
||||
if ops.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
let count = {
|
||||
let mut conn = db.lock()?;
|
||||
flush_to_sqlite(ops, &mut conn)?
|
||||
};
|
||||
let duration = start.elapsed();
|
||||
|
||||
let bytes_written = calculate_bytes_written(ops);
|
||||
metrics.record_flush(count, duration, bytes_written);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to perform a flush asynchronously (for normal operations)
|
||||
///
|
||||
/// This runs the blocking SQLite operations on a thread pool via
|
||||
/// blocking::unblock to avoid blocking the async runtime. This works with both
|
||||
/// Bevy's async-executor and tokio runtimes, making it compatible with the
|
||||
/// current Bevy integration and the future dedicated iOS async runtime.
|
||||
async fn perform_flush_async(ops: Vec<PersistenceOp>, db: PersistenceDb) -> Result<FlushResult> {
|
||||
if ops.is_empty() {
|
||||
return Ok(FlushResult {
|
||||
operations_count: 0,
|
||||
duration: std::time::Duration::ZERO,
|
||||
bytes_written: 0,
|
||||
});
|
||||
}
|
||||
|
||||
let bytes_written = calculate_bytes_written(&ops);
|
||||
|
||||
// Use blocking::unblock which works with any async runtime (async-executor,
|
||||
// tokio, etc.) This spawns the blocking operation on a dedicated thread
|
||||
// pool
|
||||
let result = blocking::unblock(move || {
|
||||
let start = Instant::now();
|
||||
|
||||
let count = {
|
||||
let mut conn = db.lock()?;
|
||||
flush_to_sqlite(&ops, &mut conn)?
|
||||
};
|
||||
|
||||
let duration = start.elapsed();
|
||||
|
||||
Ok::<_, crate::persistence::PersistenceError>((count, duration))
|
||||
})
|
||||
.await?;
|
||||
|
||||
let (count, duration) = result;
|
||||
|
||||
Ok(FlushResult {
|
||||
operations_count: count,
|
||||
duration,
|
||||
bytes_written,
|
||||
})
|
||||
}
|
||||
|
||||
/// System to flush the write buffer to SQLite asynchronously
|
||||
///
|
||||
/// This system runs on a schedule based on the configuration and battery
|
||||
/// status. It spawns async tasks to avoid blocking the main thread and handles
|
||||
/// errors gracefully.
|
||||
///
|
||||
/// The system also polls pending flush tasks and updates metrics when they
|
||||
/// complete.
|
||||
pub fn flush_system(
|
||||
mut write_buffer: ResMut<WriteBufferResource>,
|
||||
db: Res<PersistenceDb>,
|
||||
config: Res<PersistenceConfig>,
|
||||
battery: Res<BatteryStatus>,
|
||||
mut metrics: ResMut<PersistenceMetrics>,
|
||||
mut pending_tasks: ResMut<PendingFlushTasks>,
|
||||
mut health: ResMut<PersistenceHealth>,
|
||||
mut failure_events: MessageWriter<PersistenceFailureEvent>,
|
||||
mut recovery_events: MessageWriter<PersistenceRecoveryEvent>,
|
||||
) {
|
||||
// First, poll and handle completed async flush tasks
|
||||
pending_tasks.tasks.retain_mut(|task| {
|
||||
if let Some(result) = future::block_on(future::poll_once(task)) {
|
||||
match result {
|
||||
| Ok(flush_result) => {
|
||||
let previous_failures = health.consecutive_flush_failures;
|
||||
health.record_flush_success();
|
||||
|
||||
// Update metrics
|
||||
metrics.record_flush(
|
||||
flush_result.operations_count,
|
||||
flush_result.duration,
|
||||
flush_result.bytes_written,
|
||||
);
|
||||
|
||||
// Emit recovery event if we recovered from failures
|
||||
if previous_failures > 0 {
|
||||
recovery_events.write(PersistenceRecoveryEvent { previous_failures });
|
||||
}
|
||||
},
|
||||
| Err(e) => {
|
||||
health.record_flush_failure();
|
||||
|
||||
let error_msg = format!("{}", e);
|
||||
error!(
|
||||
"Async flush failed (attempt {}/{}): {}",
|
||||
health.consecutive_flush_failures,
|
||||
PersistenceHealth::CIRCUIT_BREAKER_THRESHOLD,
|
||||
error_msg
|
||||
);
|
||||
|
||||
// Emit failure event
|
||||
failure_events.write(PersistenceFailureEvent {
|
||||
error: error_msg,
|
||||
consecutive_failures: health.consecutive_flush_failures,
|
||||
circuit_breaker_open: health.circuit_breaker_open,
|
||||
});
|
||||
},
|
||||
}
|
||||
false // Remove completed task
|
||||
} else {
|
||||
true // Keep pending task
|
||||
}
|
||||
});
|
||||
|
||||
// Check circuit breaker before spawning new flush
|
||||
if !health.should_attempt_operation() {
|
||||
return;
|
||||
}
|
||||
|
||||
let flush_interval = config.get_flush_interval(battery.level, battery.is_charging);
|
||||
|
||||
// Check if we should flush
|
||||
if !write_buffer.should_flush(flush_interval) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Take operations from buffer
|
||||
let ops = write_buffer.take_operations();
|
||||
if ops.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Spawn async flush task on I/O thread pool
|
||||
let task_pool = IoTaskPool::get();
|
||||
let db_clone = db.clone();
|
||||
|
||||
let task = task_pool.spawn(async move { perform_flush_async(ops, db_clone.clone()).await });
|
||||
|
||||
pending_tasks.tasks.push(task);
|
||||
|
||||
// Update last flush time
|
||||
write_buffer.last_flush = Instant::now();
|
||||
}
|
||||
|
||||
/// System to checkpoint the WAL file
|
||||
///
|
||||
/// This runs less frequently than flush_system to merge the WAL into the main
|
||||
/// database.
|
||||
pub fn checkpoint_system(
|
||||
db: &PersistenceDb,
|
||||
config: &PersistenceConfig,
|
||||
timer: &mut CheckpointTimer,
|
||||
metrics: &mut PersistenceMetrics,
|
||||
) -> Result<()> {
|
||||
let checkpoint_interval = config.get_checkpoint_interval();
|
||||
|
||||
// Check if it's time to checkpoint
|
||||
if timer.last_checkpoint.elapsed() < checkpoint_interval {
|
||||
// Also check WAL size
|
||||
let wal_size = {
|
||||
let conn = db.lock()?;
|
||||
get_wal_size(&conn)?
|
||||
};
|
||||
|
||||
metrics.update_wal_size(wal_size as u64);
|
||||
|
||||
// Force checkpoint if WAL is too large
|
||||
if wal_size < config.max_wal_size_bytes as i64 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// Perform checkpoint
|
||||
let start = Instant::now();
|
||||
let info = {
|
||||
let mut conn = db.lock()?;
|
||||
checkpoint_wal(&mut conn, CheckpointMode::Passive)?
|
||||
};
|
||||
let duration = start.elapsed();
|
||||
|
||||
// Update metrics
|
||||
metrics.record_checkpoint(duration);
|
||||
timer.last_checkpoint = Instant::now();
|
||||
|
||||
// Log if checkpoint was busy
|
||||
if info.busy {
|
||||
tracing::warn!("WAL checkpoint was busy - some pages may not have been checkpointed");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// System to handle application shutdown
|
||||
///
|
||||
/// This ensures a final flush and checkpoint before the application exits.
|
||||
/// Uses synchronous flush to ensure all data is written before exit.
|
||||
///
|
||||
/// **CRITICAL**: Waits for all pending async flush tasks to complete before
|
||||
/// proceeding with shutdown. This prevents data loss from in-flight operations.
|
||||
pub fn shutdown_system(
|
||||
write_buffer: &mut WriteBuffer,
|
||||
db: &PersistenceDb,
|
||||
metrics: &mut PersistenceMetrics,
|
||||
pending_tasks: Option<&mut PendingFlushTasks>,
|
||||
) -> Result<()> {
|
||||
// CRITICAL: Wait for all pending async flushes to complete
|
||||
// This prevents data loss from in-flight operations
|
||||
if let Some(pending) = pending_tasks {
|
||||
info!(
|
||||
"Waiting for {} pending flush tasks to complete before shutdown",
|
||||
pending.tasks.len()
|
||||
);
|
||||
|
||||
for task in pending.tasks.drain(..) {
|
||||
// Block on each pending task to ensure completion
|
||||
match future::block_on(task) {
|
||||
| Ok(flush_result) => {
|
||||
// Update metrics for completed flush
|
||||
metrics.record_flush(
|
||||
flush_result.operations_count,
|
||||
flush_result.duration,
|
||||
flush_result.bytes_written,
|
||||
);
|
||||
debug!(
|
||||
"Pending flush completed: {} operations",
|
||||
flush_result.operations_count
|
||||
);
|
||||
},
|
||||
| Err(e) => {
|
||||
error!("Pending flush failed during shutdown: {}", e);
|
||||
// Continue with shutdown even if a task failed
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
info!("All pending flush tasks completed");
|
||||
}
|
||||
|
||||
// Force flush any remaining operations (synchronous for shutdown)
|
||||
let ops = write_buffer.take_operations();
|
||||
perform_flush_sync(&ops, db, metrics)?;
|
||||
|
||||
// Checkpoint the WAL
|
||||
let start = Instant::now();
|
||||
{
|
||||
let mut conn = db.lock()?;
|
||||
checkpoint_wal(&mut conn, CheckpointMode::Truncate)?;
|
||||
|
||||
// Mark clean shutdown
|
||||
mark_clean_shutdown(&mut conn)?;
|
||||
}
|
||||
let duration = start.elapsed();
|
||||
metrics.record_checkpoint(duration);
|
||||
metrics.record_clean_shutdown();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// System to initialize persistence on startup
|
||||
///
|
||||
/// This checks for crash recovery and sets up the session.
|
||||
pub fn startup_system(db: &PersistenceDb, metrics: &mut PersistenceMetrics) -> Result<()> {
|
||||
let mut conn = db.lock()?;
|
||||
|
||||
// Check if previous session shut down cleanly
|
||||
let clean_shutdown = check_clean_shutdown(&mut conn)?;
|
||||
|
||||
if !clean_shutdown {
|
||||
tracing::warn!("Previous session did not shut down cleanly - crash detected");
|
||||
metrics.record_crash_recovery();
|
||||
|
||||
// Perform any necessary recovery operations here
|
||||
// For now, SQLite's WAL mode handles recovery automatically
|
||||
} else {
|
||||
tracing::info!("Previous session shut down cleanly");
|
||||
}
|
||||
|
||||
// Set up new session
|
||||
let session = SessionState::new();
|
||||
set_session_state(&mut conn, "session_id", &session.session_id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to force an immediate flush (for critical operations)
|
||||
///
|
||||
/// Uses synchronous flush to ensure data is written immediately.
|
||||
/// Suitable for critical operations like iOS background events.
|
||||
pub fn force_flush(
|
||||
write_buffer: &mut WriteBuffer,
|
||||
db: &PersistenceDb,
|
||||
metrics: &mut PersistenceMetrics,
|
||||
) -> Result<()> {
|
||||
let ops = write_buffer.take_operations();
|
||||
perform_flush_sync(&ops, db, metrics)?;
|
||||
write_buffer.last_flush = Instant::now();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_persistence_db_in_memory() -> Result<()> {
|
||||
let db = PersistenceDb::in_memory()?;
|
||||
|
||||
// Verify we can write and read
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
let ops = vec![PersistenceOp::UpsertEntity {
|
||||
id: entity_id,
|
||||
data: EntityData {
|
||||
id: entity_id,
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
entity_type: "TestEntity".to_string(),
|
||||
},
|
||||
}];
|
||||
|
||||
let mut conn = db.lock()?;
|
||||
flush_to_sqlite(&ops, &mut conn)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_system() -> Result<()> {
|
||||
let db = PersistenceDb::in_memory()?;
|
||||
let mut write_buffer = WriteBuffer::new(1000);
|
||||
let mut metrics = PersistenceMetrics::default();
|
||||
|
||||
// Add some operations
|
||||
let entity_id = uuid::Uuid::new_v4();
|
||||
|
||||
// First add the entity
|
||||
write_buffer
|
||||
.add(PersistenceOp::UpsertEntity {
|
||||
id: entity_id,
|
||||
data: EntityData {
|
||||
id: entity_id,
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
entity_type: "TestEntity".to_string(),
|
||||
},
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Then add a component
|
||||
write_buffer
|
||||
.add(PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![1, 2, 3],
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Take operations and flush synchronously (testing the flush logic)
|
||||
let ops = write_buffer.take_operations();
|
||||
perform_flush_sync(&ops, &db, &mut metrics)?;
|
||||
|
||||
assert_eq!(metrics.flush_count, 1);
|
||||
assert_eq!(write_buffer.len(), 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
886
crates/libmarathon/src/persistence/types.rs
Normal file
886
crates/libmarathon/src/persistence/types.rs
Normal file
@@ -0,0 +1,886 @@
|
||||
//! Core types for the persistence layer
|
||||
|
||||
use std::{
|
||||
collections::{
|
||||
HashMap,
|
||||
HashSet,
|
||||
},
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use bevy::prelude::Resource;
|
||||
use chrono::{
|
||||
DateTime,
|
||||
Utc,
|
||||
};
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
/// Maximum size for a single component in bytes (10MB)
|
||||
/// Components larger than this may indicate serialization issues or unbounded
|
||||
/// data growth
|
||||
const MAX_COMPONENT_SIZE_BYTES: usize = 10 * 1024 * 1024;
|
||||
|
||||
/// Critical flush deadline in milliseconds (1 second for tier-1 operations)
|
||||
const CRITICAL_FLUSH_DEADLINE_MS: u64 = 1000;
|
||||
|
||||
/// Unique identifier for entities that can be synced across nodes
|
||||
pub type EntityId = uuid::Uuid;
|
||||
|
||||
/// Node identifier for CRDT operations
|
||||
pub type NodeId = uuid::Uuid;
|
||||
|
||||
/// Priority level for persistence operations
|
||||
///
|
||||
/// Determines how quickly an operation should be flushed to disk:
|
||||
/// - **Normal**: Regular batched flushing (5-60s intervals based on battery)
|
||||
/// - **Critical**: Flush within 1 second (tier-1 operations like user actions,
|
||||
/// CRDT ops)
|
||||
/// - **Immediate**: Flush immediately (shutdown, background suspension)
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum FlushPriority {
|
||||
/// Normal priority - regular batched flushing
|
||||
Normal,
|
||||
/// Critical priority - flush within 1 second
|
||||
Critical,
|
||||
/// Immediate priority - flush right now
|
||||
Immediate,
|
||||
}
|
||||
|
||||
/// Resource to track entities with uncommitted changes
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DirtyEntities {
|
||||
/// Set of entity IDs with changes not yet in write buffer
|
||||
pub entities: HashSet<EntityId>,
|
||||
|
||||
/// Map of entity ID to set of dirty component type names
|
||||
pub components: HashMap<EntityId, HashSet<String>>,
|
||||
|
||||
/// Track when each entity was last modified (for prioritization)
|
||||
pub last_modified: HashMap<EntityId, Instant>,
|
||||
}
|
||||
|
||||
impl DirtyEntities {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Mark an entity's component as dirty
|
||||
pub fn mark_dirty(&mut self, entity_id: EntityId, component_type: impl Into<String>) {
|
||||
self.entities.insert(entity_id);
|
||||
self.components
|
||||
.entry(entity_id)
|
||||
.or_default()
|
||||
.insert(component_type.into());
|
||||
self.last_modified.insert(entity_id, Instant::now());
|
||||
}
|
||||
|
||||
/// Clear all dirty tracking (called after flush to write buffer)
|
||||
pub fn clear(&mut self) {
|
||||
self.entities.clear();
|
||||
self.components.clear();
|
||||
self.last_modified.clear();
|
||||
}
|
||||
|
||||
/// Check if an entity is dirty
|
||||
pub fn is_dirty(&self, entity_id: &EntityId) -> bool {
|
||||
self.entities.contains(entity_id)
|
||||
}
|
||||
|
||||
/// Get the number of dirty entities
|
||||
pub fn count(&self) -> usize {
|
||||
self.entities.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Operations that can be persisted to the database
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum PersistenceOp {
|
||||
/// Insert or update an entity's existence
|
||||
UpsertEntity { id: EntityId, data: EntityData },
|
||||
|
||||
/// Insert or update a component on an entity
|
||||
UpsertComponent {
|
||||
entity_id: EntityId,
|
||||
component_type: String,
|
||||
data: Vec<u8>,
|
||||
},
|
||||
|
||||
/// Log an operation for CRDT sync
|
||||
LogOperation {
|
||||
node_id: NodeId,
|
||||
sequence: u64,
|
||||
operation: Vec<u8>,
|
||||
},
|
||||
|
||||
/// Update vector clock for causality tracking
|
||||
UpdateVectorClock { node_id: NodeId, counter: u64 },
|
||||
|
||||
/// Delete an entity
|
||||
DeleteEntity { id: EntityId },
|
||||
|
||||
/// Delete a component from an entity
|
||||
DeleteComponent {
|
||||
entity_id: EntityId,
|
||||
component_type: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl PersistenceOp {
|
||||
/// Get the default priority for this operation type
|
||||
///
|
||||
/// CRDT operations (LogOperation, UpdateVectorClock) are critical tier-1
|
||||
/// operations that should be flushed within 1 second to maintain
|
||||
/// causality across nodes. Other operations use normal priority by
|
||||
/// default.
|
||||
pub fn default_priority(&self) -> FlushPriority {
|
||||
match self {
|
||||
// CRDT operations are tier-1 (critical)
|
||||
| PersistenceOp::LogOperation { .. } | PersistenceOp::UpdateVectorClock { .. } => {
|
||||
FlushPriority::Critical
|
||||
},
|
||||
// All other operations are normal priority by default
|
||||
| _ => FlushPriority::Normal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Metadata about an entity
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EntityData {
|
||||
pub id: EntityId,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub entity_type: String,
|
||||
}
|
||||
|
||||
/// Write buffer for batching persistence operations
|
||||
#[derive(Debug)]
|
||||
pub struct WriteBuffer {
|
||||
/// Pending operations not yet committed to SQLite
|
||||
pub pending_operations: Vec<PersistenceOp>,
|
||||
|
||||
/// Index mapping (entity_id, component_type) to position in
|
||||
/// pending_operations Enables O(1) deduplication for UpsertComponent
|
||||
/// operations
|
||||
component_index: std::collections::HashMap<(EntityId, String), usize>,
|
||||
|
||||
/// Index mapping entity_id to position in pending_operations
|
||||
/// Enables O(1) deduplication for UpsertEntity operations
|
||||
entity_index: std::collections::HashMap<EntityId, usize>,
|
||||
|
||||
/// When the buffer was last flushed
|
||||
pub last_flush: Instant,
|
||||
|
||||
/// Maximum number of operations before forcing a flush
|
||||
pub max_operations: usize,
|
||||
|
||||
/// Highest priority operation currently in the buffer
|
||||
pub highest_priority: FlushPriority,
|
||||
|
||||
/// When the first critical operation was added (for deadline tracking)
|
||||
pub first_critical_time: Option<Instant>,
|
||||
}
|
||||
|
||||
impl WriteBuffer {
|
||||
pub fn new(max_operations: usize) -> Self {
|
||||
Self {
|
||||
pending_operations: Vec::new(),
|
||||
component_index: std::collections::HashMap::new(),
|
||||
entity_index: std::collections::HashMap::new(),
|
||||
last_flush: Instant::now(),
|
||||
max_operations,
|
||||
highest_priority: FlushPriority::Normal,
|
||||
first_critical_time: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an operation to the write buffer with normal priority
|
||||
///
|
||||
/// This is a convenience method that calls `add_with_priority` with
|
||||
/// `FlushPriority::Normal`.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns `PersistenceError::ComponentTooLarge` if component data exceeds
|
||||
/// MAX_COMPONENT_SIZE_BYTES (10MB)
|
||||
pub fn add(&mut self, op: PersistenceOp) -> Result<(), crate::persistence::PersistenceError> {
|
||||
self.add_with_priority(op, FlushPriority::Normal)
|
||||
}
|
||||
|
||||
/// Add an operation using its default priority
|
||||
///
|
||||
/// Uses `PersistenceOp::default_priority()` to determine priority
|
||||
/// automatically. CRDT operations will be added as Critical, others as
|
||||
/// Normal.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns `PersistenceError::ComponentTooLarge` if component data exceeds
|
||||
/// MAX_COMPONENT_SIZE_BYTES (10MB)
|
||||
pub fn add_with_default_priority(
|
||||
&mut self,
|
||||
op: PersistenceOp,
|
||||
) -> Result<(), crate::persistence::PersistenceError> {
|
||||
let priority = op.default_priority();
|
||||
self.add_with_priority(op, priority)
|
||||
}
|
||||
|
||||
/// Add an operation to the write buffer with the specified priority
|
||||
///
|
||||
/// If an operation for the same entity+component already exists,
|
||||
/// it will be replaced (keeping only the latest state). The priority
|
||||
/// is tracked separately to determine flush urgency.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns `PersistenceError::ComponentTooLarge` if component data exceeds
|
||||
/// MAX_COMPONENT_SIZE_BYTES (10MB)
|
||||
pub fn add_with_priority(
|
||||
&mut self,
|
||||
op: PersistenceOp,
|
||||
priority: FlushPriority,
|
||||
) -> Result<(), crate::persistence::PersistenceError> {
|
||||
// Validate component size to prevent unbounded memory growth
|
||||
match &op {
|
||||
| PersistenceOp::UpsertComponent {
|
||||
data,
|
||||
component_type,
|
||||
..
|
||||
} => {
|
||||
if data.len() > MAX_COMPONENT_SIZE_BYTES {
|
||||
return Err(crate::persistence::PersistenceError::ComponentTooLarge {
|
||||
component_type: component_type.clone(),
|
||||
size_bytes: data.len(),
|
||||
max_bytes: MAX_COMPONENT_SIZE_BYTES,
|
||||
});
|
||||
}
|
||||
},
|
||||
| PersistenceOp::LogOperation { operation, .. } => {
|
||||
if operation.len() > MAX_COMPONENT_SIZE_BYTES {
|
||||
return Err(crate::persistence::PersistenceError::ComponentTooLarge {
|
||||
component_type: "Operation".to_string(),
|
||||
size_bytes: operation.len(),
|
||||
max_bytes: MAX_COMPONENT_SIZE_BYTES,
|
||||
});
|
||||
}
|
||||
},
|
||||
| _ => {},
|
||||
}
|
||||
|
||||
match &op {
|
||||
| PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type,
|
||||
..
|
||||
} => {
|
||||
// O(1) lookup: check if we already have this component
|
||||
let key = (*entity_id, component_type.clone());
|
||||
if let Some(&old_pos) = self.component_index.get(&key) {
|
||||
// Replace existing operation in-place
|
||||
self.pending_operations[old_pos] = op;
|
||||
return Ok(());
|
||||
}
|
||||
// New operation: add to index
|
||||
let new_pos = self.pending_operations.len();
|
||||
self.component_index.insert(key, new_pos);
|
||||
},
|
||||
| PersistenceOp::UpsertEntity { id, .. } => {
|
||||
// O(1) lookup: check if we already have this entity
|
||||
if let Some(&old_pos) = self.entity_index.get(id) {
|
||||
// Replace existing operation in-place
|
||||
self.pending_operations[old_pos] = op;
|
||||
return Ok(());
|
||||
}
|
||||
// New operation: add to index
|
||||
let new_pos = self.pending_operations.len();
|
||||
self.entity_index.insert(*id, new_pos);
|
||||
},
|
||||
| _ => {
|
||||
// Other operations don't need coalescing
|
||||
},
|
||||
}
|
||||
|
||||
// Track priority for flush urgency
|
||||
if priority > self.highest_priority {
|
||||
self.highest_priority = priority;
|
||||
}
|
||||
|
||||
// Track when first critical operation was added (for deadline enforcement)
|
||||
if priority >= FlushPriority::Critical && self.first_critical_time.is_none() {
|
||||
self.first_critical_time = Some(Instant::now());
|
||||
}
|
||||
|
||||
self.pending_operations.push(op);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Take all pending operations and return them for flushing
|
||||
///
|
||||
/// This resets the priority tracking state and clears the deduplication
|
||||
/// indices.
|
||||
pub fn take_operations(&mut self) -> Vec<PersistenceOp> {
|
||||
// Reset priority tracking when operations are taken
|
||||
self.highest_priority = FlushPriority::Normal;
|
||||
self.first_critical_time = None;
|
||||
|
||||
// Clear deduplication indices
|
||||
self.component_index.clear();
|
||||
self.entity_index.clear();
|
||||
|
||||
std::mem::take(&mut self.pending_operations)
|
||||
}
|
||||
|
||||
/// Check if buffer should be flushed
|
||||
///
|
||||
/// Returns true if any of these conditions are met:
|
||||
/// - Buffer is at capacity (max_operations reached)
|
||||
/// - Regular flush interval has elapsed (for normal priority)
|
||||
/// - Critical operation deadline exceeded (1 second for critical ops)
|
||||
/// - Immediate priority operation exists
|
||||
pub fn should_flush(&self, flush_interval: std::time::Duration) -> bool {
|
||||
// Immediate priority always flushes
|
||||
if self.highest_priority == FlushPriority::Immediate {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Critical priority flushes after 1 second deadline
|
||||
if self.highest_priority == FlushPriority::Critical {
|
||||
if let Some(critical_time) = self.first_critical_time {
|
||||
if critical_time.elapsed().as_millis() >= CRITICAL_FLUSH_DEADLINE_MS as u128 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Normal flushing conditions
|
||||
self.pending_operations.len() >= self.max_operations ||
|
||||
self.last_flush.elapsed() >= flush_interval
|
||||
}
|
||||
|
||||
/// Get the number of pending operations
|
||||
pub fn len(&self) -> usize {
|
||||
self.pending_operations.len()
|
||||
}
|
||||
|
||||
/// Check if the buffer is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.pending_operations.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Battery status for adaptive flushing
|
||||
#[derive(Debug, Clone, Copy, Resource)]
|
||||
pub struct BatteryStatus {
|
||||
/// Battery level from 0.0 to 1.0
|
||||
pub level: f32,
|
||||
|
||||
/// Whether the device is currently charging
|
||||
pub is_charging: bool,
|
||||
|
||||
/// Whether low power mode is enabled (iOS)
|
||||
pub is_low_power_mode: bool,
|
||||
}
|
||||
|
||||
impl Default for BatteryStatus {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
level: 1.0,
|
||||
is_charging: false,
|
||||
is_low_power_mode: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BatteryStatus {
|
||||
/// Update battery status from iOS UIDevice.batteryLevel
|
||||
///
|
||||
/// # iOS Integration Example
|
||||
///
|
||||
/// ```swift
|
||||
/// // In your iOS app code:
|
||||
/// UIDevice.current.isBatteryMonitoringEnabled = true
|
||||
/// let batteryLevel = UIDevice.current.batteryLevel // Returns 0.0 to 1.0
|
||||
/// let isCharging = UIDevice.current.batteryState == .charging ||
|
||||
/// UIDevice.current.batteryState == .full
|
||||
/// let isLowPowerMode = ProcessInfo.processInfo.isLowPowerModeEnabled
|
||||
///
|
||||
/// // Update Bevy resource (this is pseudocode - actual implementation depends on your bridge)
|
||||
/// battery_status.update_from_ios(batteryLevel, isCharging, isLowPowerMode);
|
||||
/// ```
|
||||
pub fn update_from_ios(&mut self, level: f32, is_charging: bool, is_low_power_mode: bool) {
|
||||
self.level = level.clamp(0.0, 1.0);
|
||||
self.is_charging = is_charging;
|
||||
self.is_low_power_mode = is_low_power_mode;
|
||||
}
|
||||
|
||||
/// Check if the device is in a battery-critical state
|
||||
///
|
||||
/// Returns true if battery is low (<20%) and not charging, or low power
|
||||
/// mode is enabled.
|
||||
pub fn is_battery_critical(&self) -> bool {
|
||||
(self.level < 0.2 && !self.is_charging) || self.is_low_power_mode
|
||||
}
|
||||
}
|
||||
|
||||
/// Session state tracking for crash detection
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SessionState {
|
||||
pub session_id: String,
|
||||
pub started_at: DateTime<Utc>,
|
||||
pub clean_shutdown: bool,
|
||||
}
|
||||
|
||||
impl SessionState {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
session_id: uuid::Uuid::new_v4().to_string(),
|
||||
started_at: Utc::now(),
|
||||
clean_shutdown: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SessionState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dirty_entities_tracking() {
|
||||
let mut dirty = DirtyEntities::new();
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
dirty.mark_dirty(entity_id, "Transform");
|
||||
assert!(dirty.is_dirty(&entity_id));
|
||||
assert_eq!(dirty.count(), 1);
|
||||
|
||||
dirty.clear();
|
||||
assert!(!dirty.is_dirty(&entity_id));
|
||||
assert_eq!(dirty.count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_buffer_coalescing() -> Result<(), crate::persistence::PersistenceError> {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
// Add first version
|
||||
buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![1, 2, 3],
|
||||
})?;
|
||||
assert_eq!(buffer.len(), 1);
|
||||
|
||||
// Add second version (should replace first)
|
||||
buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![4, 5, 6],
|
||||
})?;
|
||||
assert_eq!(buffer.len(), 1);
|
||||
|
||||
// Verify only latest version exists
|
||||
let ops = buffer.take_operations();
|
||||
assert_eq!(ops.len(), 1);
|
||||
if let PersistenceOp::UpsertComponent { data, .. } = &ops[0] {
|
||||
assert_eq!(data, &vec![4, 5, 6]);
|
||||
} else {
|
||||
panic!("Expected UpsertComponent");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_buffer_different_components() {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
// Add Transform
|
||||
buffer
|
||||
.add(PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![1, 2, 3],
|
||||
})
|
||||
.expect("Should successfully add Transform");
|
||||
|
||||
// Add Velocity (different component, should not coalesce)
|
||||
buffer
|
||||
.add(PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "Velocity".to_string(),
|
||||
data: vec![4, 5, 6],
|
||||
})
|
||||
.expect("Should successfully add Velocity");
|
||||
|
||||
assert_eq!(buffer.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_priority_immediate() {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
// Add operation with immediate priority
|
||||
buffer
|
||||
.add_with_priority(
|
||||
PersistenceOp::UpsertEntity {
|
||||
id: entity_id,
|
||||
data: EntityData {
|
||||
id: entity_id,
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
entity_type: "TestEntity".to_string(),
|
||||
},
|
||||
},
|
||||
FlushPriority::Immediate,
|
||||
)
|
||||
.expect("Should successfully add entity with immediate priority");
|
||||
|
||||
// Should flush immediately regardless of interval
|
||||
assert!(buffer.should_flush(std::time::Duration::from_secs(100)));
|
||||
assert_eq!(buffer.highest_priority, FlushPriority::Immediate);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_priority_critical_deadline() -> Result<(), crate::persistence::PersistenceError> {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
// Add operation with critical priority
|
||||
buffer.add_with_priority(
|
||||
PersistenceOp::UpsertEntity {
|
||||
id: entity_id,
|
||||
data: EntityData {
|
||||
id: entity_id,
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
entity_type: "TestEntity".to_string(),
|
||||
},
|
||||
},
|
||||
FlushPriority::Critical,
|
||||
)?;
|
||||
|
||||
assert_eq!(buffer.highest_priority, FlushPriority::Critical);
|
||||
assert!(buffer.first_critical_time.is_some());
|
||||
|
||||
// Should not flush immediately
|
||||
assert!(!buffer.should_flush(std::time::Duration::from_secs(100)));
|
||||
|
||||
// Simulate deadline passing by manually setting the time
|
||||
buffer.first_critical_time = Some(
|
||||
Instant::now() - std::time::Duration::from_millis(CRITICAL_FLUSH_DEADLINE_MS + 100),
|
||||
);
|
||||
|
||||
// Now should flush due to deadline
|
||||
assert!(buffer.should_flush(std::time::Duration::from_secs(100)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_priority_normal() -> Result<(), crate::persistence::PersistenceError> {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
// Add normal priority operation
|
||||
buffer.add(PersistenceOp::UpsertEntity {
|
||||
id: entity_id,
|
||||
data: EntityData {
|
||||
id: entity_id,
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
entity_type: "TestEntity".to_string(),
|
||||
},
|
||||
})?;
|
||||
|
||||
assert_eq!(buffer.highest_priority, FlushPriority::Normal);
|
||||
assert!(buffer.first_critical_time.is_none());
|
||||
|
||||
// Should not flush before interval
|
||||
assert!(!buffer.should_flush(std::time::Duration::from_secs(100)));
|
||||
|
||||
// Set last flush to past
|
||||
buffer.last_flush = Instant::now() - std::time::Duration::from_secs(200);
|
||||
|
||||
// Now should flush
|
||||
assert!(buffer.should_flush(std::time::Duration::from_secs(100)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_priority_reset_on_take() -> Result<(), crate::persistence::PersistenceError> {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
// Add critical operation
|
||||
buffer.add_with_priority(
|
||||
PersistenceOp::UpsertEntity {
|
||||
id: entity_id,
|
||||
data: EntityData {
|
||||
id: entity_id,
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
entity_type: "TestEntity".to_string(),
|
||||
},
|
||||
},
|
||||
FlushPriority::Critical,
|
||||
)?;
|
||||
|
||||
assert_eq!(buffer.highest_priority, FlushPriority::Critical);
|
||||
assert!(buffer.first_critical_time.is_some());
|
||||
|
||||
// Take operations
|
||||
let ops = buffer.take_operations();
|
||||
assert_eq!(ops.len(), 1);
|
||||
|
||||
// Priority should be reset
|
||||
assert_eq!(buffer.highest_priority, FlushPriority::Normal);
|
||||
assert!(buffer.first_critical_time.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_priority_for_crdt_ops() {
|
||||
let node_id = NodeId::new_v4();
|
||||
|
||||
let log_op = PersistenceOp::LogOperation {
|
||||
node_id,
|
||||
sequence: 1,
|
||||
operation: vec![1, 2, 3],
|
||||
};
|
||||
|
||||
let vector_clock_op = PersistenceOp::UpdateVectorClock {
|
||||
node_id,
|
||||
counter: 42,
|
||||
};
|
||||
|
||||
let entity_op = PersistenceOp::UpsertEntity {
|
||||
id: EntityId::new_v4(),
|
||||
data: EntityData {
|
||||
id: EntityId::new_v4(),
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
entity_type: "TestEntity".to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
// CRDT operations should have Critical priority
|
||||
assert_eq!(log_op.default_priority(), FlushPriority::Critical);
|
||||
assert_eq!(vector_clock_op.default_priority(), FlushPriority::Critical);
|
||||
|
||||
// Other operations should have Normal priority
|
||||
assert_eq!(entity_op.default_priority(), FlushPriority::Normal);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_consistency_after_operations() -> Result<(), crate::persistence::PersistenceError>
|
||||
{
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
// Add component multiple times - should only keep latest
|
||||
for i in 0..10 {
|
||||
buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![i],
|
||||
})?;
|
||||
}
|
||||
|
||||
// Buffer should only have 1 operation (latest)
|
||||
assert_eq!(buffer.len(), 1);
|
||||
|
||||
// Verify it's the latest data
|
||||
let ops = buffer.take_operations();
|
||||
assert_eq!(ops.len(), 1);
|
||||
if let PersistenceOp::UpsertComponent { data, .. } = &ops[0] {
|
||||
assert_eq!(data, &vec![9]);
|
||||
} else {
|
||||
panic!("Expected UpsertComponent");
|
||||
}
|
||||
|
||||
// After take, indices should be cleared and we can reuse
|
||||
buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![100],
|
||||
})?;
|
||||
|
||||
assert_eq!(buffer.len(), 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_handles_multiple_entities() -> Result<(), crate::persistence::PersistenceError> {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity1 = EntityId::new_v4();
|
||||
let entity2 = EntityId::new_v4();
|
||||
|
||||
// Add same component type for different entities
|
||||
buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id: entity1,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![1],
|
||||
})?;
|
||||
|
||||
buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id: entity2,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![2],
|
||||
})?;
|
||||
|
||||
// Should have 2 operations (different entities)
|
||||
assert_eq!(buffer.len(), 2);
|
||||
|
||||
// Update first entity
|
||||
buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id: entity1,
|
||||
component_type: "Transform".to_string(),
|
||||
data: vec![3],
|
||||
})?;
|
||||
|
||||
// Still 2 operations (first was replaced in-place)
|
||||
assert_eq!(buffer.len(), 2);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_with_default_priority() {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let node_id = NodeId::new_v4();
|
||||
|
||||
// Add CRDT operation using default priority
|
||||
buffer
|
||||
.add_with_default_priority(PersistenceOp::LogOperation {
|
||||
node_id,
|
||||
sequence: 1,
|
||||
operation: vec![1, 2, 3],
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Should be tracked as Critical
|
||||
assert_eq!(buffer.highest_priority, FlushPriority::Critical);
|
||||
assert!(buffer.first_critical_time.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_oversized_component_returns_error() {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
// Create 11MB component (exceeds 10MB limit)
|
||||
let oversized_data = vec![0u8; 11 * 1024 * 1024];
|
||||
|
||||
let result = buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "HugeComponent".to_string(),
|
||||
data: oversized_data,
|
||||
});
|
||||
|
||||
// Should return error, not panic
|
||||
assert!(result.is_err());
|
||||
match result {
|
||||
| Err(crate::persistence::PersistenceError::ComponentTooLarge {
|
||||
component_type,
|
||||
size_bytes,
|
||||
max_bytes,
|
||||
}) => {
|
||||
assert_eq!(component_type, "HugeComponent");
|
||||
assert_eq!(size_bytes, 11 * 1024 * 1024);
|
||||
assert_eq!(max_bytes, MAX_COMPONENT_SIZE_BYTES);
|
||||
},
|
||||
| _ => panic!("Expected ComponentTooLarge error"),
|
||||
}
|
||||
|
||||
// Buffer should be unchanged
|
||||
assert_eq!(buffer.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_size_component_succeeds() {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let entity_id = EntityId::new_v4();
|
||||
|
||||
// Create exactly 10MB component (at limit)
|
||||
let max_data = vec![0u8; 10 * 1024 * 1024];
|
||||
|
||||
let result = buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: "MaxComponent".to_string(),
|
||||
data: max_data,
|
||||
});
|
||||
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(buffer.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_oversized_operation_returns_error() {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let oversized_op = vec![0u8; 11 * 1024 * 1024];
|
||||
|
||||
let result = buffer.add(PersistenceOp::LogOperation {
|
||||
node_id: uuid::Uuid::new_v4(),
|
||||
sequence: 1,
|
||||
operation: oversized_op,
|
||||
});
|
||||
|
||||
assert!(result.is_err());
|
||||
match result {
|
||||
| Err(crate::persistence::PersistenceError::ComponentTooLarge {
|
||||
component_type,
|
||||
..
|
||||
}) => {
|
||||
assert_eq!(component_type, "Operation");
|
||||
},
|
||||
| _ => panic!("Expected ComponentTooLarge error for Operation"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_buffer_never_panics_property() {
|
||||
// Property test: WriteBuffer should never panic on any size
|
||||
let sizes = [
|
||||
0,
|
||||
1000,
|
||||
1_000_000,
|
||||
5_000_000,
|
||||
10_000_000, // Exactly at limit
|
||||
10_000_001, // Just over limit
|
||||
11_000_000,
|
||||
100_000_000,
|
||||
];
|
||||
|
||||
for size in sizes {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
let data = vec![0u8; size];
|
||||
|
||||
let result = buffer.add(PersistenceOp::UpsertComponent {
|
||||
entity_id: uuid::Uuid::new_v4(),
|
||||
component_type: "TestComponent".to_string(),
|
||||
data,
|
||||
});
|
||||
|
||||
// Should never panic, always return Ok or Err
|
||||
match result {
|
||||
| Ok(_) => assert!(
|
||||
size <= MAX_COMPONENT_SIZE_BYTES,
|
||||
"Size {} should have failed",
|
||||
size
|
||||
),
|
||||
| Err(_) => assert!(
|
||||
size > MAX_COMPONENT_SIZE_BYTES,
|
||||
"Size {} should have succeeded",
|
||||
size
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
90
crates/libmarathon/src/platform/desktop/event_loop.rs
Normal file
90
crates/libmarathon/src/platform/desktop/event_loop.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
//! Desktop event loop - owns winit window and event handling
|
||||
//!
|
||||
//! This module creates and manages the main window and event loop.
|
||||
//! It converts winit events to InputEvents and provides them to the engine.
|
||||
|
||||
use super::winit_bridge;
|
||||
use winit::application::ApplicationHandler;
|
||||
use winit::event::WindowEvent;
|
||||
use winit::event_loop::{ActiveEventLoop, ControlFlow, EventLoop};
|
||||
use winit::window::{Window, WindowId};
|
||||
|
||||
/// Main event loop runner for desktop platforms
|
||||
pub struct DesktopApp {
|
||||
window: Option<Window>,
|
||||
}
|
||||
|
||||
impl DesktopApp {
|
||||
pub fn new() -> Self {
|
||||
Self { window: None }
|
||||
}
|
||||
}
|
||||
|
||||
impl ApplicationHandler for DesktopApp {
|
||||
fn resumed(&mut self, event_loop: &ActiveEventLoop) {
|
||||
if self.window.is_none() {
|
||||
let window_attributes = Window::default_attributes()
|
||||
.with_title("Marathon")
|
||||
.with_inner_size(winit::dpi::LogicalSize::new(1280, 720));
|
||||
|
||||
match event_loop.create_window(window_attributes) {
|
||||
Ok(window) => {
|
||||
tracing::info!("Created winit window");
|
||||
self.window = Some(window);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to create window: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn window_event(
|
||||
&mut self,
|
||||
event_loop: &ActiveEventLoop,
|
||||
_window_id: WindowId,
|
||||
event: WindowEvent,
|
||||
) {
|
||||
// Forward all input events to the bridge first
|
||||
winit_bridge::push_window_event(&event);
|
||||
|
||||
match event {
|
||||
WindowEvent::CloseRequested => {
|
||||
tracing::info!("Window close requested");
|
||||
event_loop.exit();
|
||||
}
|
||||
|
||||
WindowEvent::RedrawRequested => {
|
||||
// Rendering happens via Bevy
|
||||
if let Some(window) = &self.window {
|
||||
window.request_redraw();
|
||||
}
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn about_to_wait(&mut self, _event_loop: &ActiveEventLoop) {
|
||||
// Request redraw for next frame
|
||||
if let Some(window) = &self.window {
|
||||
window.request_redraw();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the desktop application with the provided game update function
|
||||
///
|
||||
/// This takes ownership of the main thread and runs the winit event loop.
|
||||
/// The update_fn is called each frame to update game logic.
|
||||
pub fn run(mut update_fn: impl FnMut() + 'static) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let event_loop = EventLoop::new()?;
|
||||
event_loop.set_control_flow(ControlFlow::Poll); // Run as fast as possible
|
||||
|
||||
let mut app = DesktopApp::new();
|
||||
|
||||
// Run the event loop, calling update_fn each frame
|
||||
event_loop.run_app(&mut app)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
9
crates/libmarathon/src/platform/desktop/mod.rs
Normal file
9
crates/libmarathon/src/platform/desktop/mod.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
//! Desktop platform integration
|
||||
//!
|
||||
//! Owns the winit event loop and converts winit events to InputEvents.
|
||||
|
||||
mod event_loop;
|
||||
mod winit_bridge;
|
||||
|
||||
pub use event_loop::run;
|
||||
pub use winit_bridge::{drain_as_input_events, push_window_event};
|
||||
225
crates/libmarathon/src/platform/desktop/winit_bridge.rs
Normal file
225
crates/libmarathon/src/platform/desktop/winit_bridge.rs
Normal file
@@ -0,0 +1,225 @@
|
||||
//! Desktop winit event loop integration
|
||||
//!
|
||||
//! This module owns the winit event loop and window, converting winit events
|
||||
//! to engine-agnostic InputEvents.
|
||||
|
||||
use crate::engine::{InputEvent, KeyCode, Modifiers, MouseButton, TouchPhase};
|
||||
use glam::Vec2;
|
||||
use std::sync::Mutex;
|
||||
use winit::event::{ElementState, MouseButton as WinitMouseButton, MouseScrollDelta, WindowEvent};
|
||||
use winit::keyboard::PhysicalKey;
|
||||
|
||||
/// Raw winit input events before conversion
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum RawWinitEvent {
|
||||
MouseButton {
|
||||
button: MouseButton,
|
||||
state: ElementState,
|
||||
position: Vec2,
|
||||
},
|
||||
CursorMoved {
|
||||
position: Vec2,
|
||||
},
|
||||
Keyboard {
|
||||
key: KeyCode,
|
||||
state: ElementState,
|
||||
modifiers: Modifiers,
|
||||
},
|
||||
MouseWheel {
|
||||
delta: Vec2,
|
||||
position: Vec2,
|
||||
},
|
||||
}
|
||||
|
||||
/// Thread-safe buffer for winit events
|
||||
///
|
||||
/// The winit event loop pushes events here.
|
||||
/// The engine drains them each frame.
|
||||
static BUFFER: Mutex<Vec<RawWinitEvent>> = Mutex::new(Vec::new());
|
||||
|
||||
/// Current input state for tracking drags and modifiers
|
||||
static INPUT_STATE: Mutex<InputState> = Mutex::new(InputState {
|
||||
left_pressed: false,
|
||||
right_pressed: false,
|
||||
middle_pressed: false,
|
||||
last_position: Vec2::ZERO,
|
||||
modifiers: Modifiers {
|
||||
shift: false,
|
||||
ctrl: false,
|
||||
alt: false,
|
||||
meta: false,
|
||||
},
|
||||
});
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct InputState {
|
||||
left_pressed: bool,
|
||||
right_pressed: bool,
|
||||
middle_pressed: bool,
|
||||
last_position: Vec2,
|
||||
modifiers: Modifiers,
|
||||
}
|
||||
|
||||
/// Push a winit window event to the buffer
|
||||
///
|
||||
/// Call this from the winit event loop
|
||||
pub fn push_window_event(event: &WindowEvent) {
|
||||
match event {
|
||||
WindowEvent::MouseInput { state, button, .. } => {
|
||||
let mouse_button = match button {
|
||||
WinitMouseButton::Left => MouseButton::Left,
|
||||
WinitMouseButton::Right => MouseButton::Right,
|
||||
WinitMouseButton::Middle => MouseButton::Middle,
|
||||
_ => return, // Ignore other buttons
|
||||
};
|
||||
|
||||
if let Ok(mut input_state) = INPUT_STATE.lock() {
|
||||
let position = input_state.last_position;
|
||||
|
||||
// Update button state
|
||||
match mouse_button {
|
||||
MouseButton::Left => input_state.left_pressed = *state == ElementState::Pressed,
|
||||
MouseButton::Right => input_state.right_pressed = *state == ElementState::Pressed,
|
||||
MouseButton::Middle => input_state.middle_pressed = *state == ElementState::Pressed,
|
||||
}
|
||||
|
||||
if let Ok(mut buf) = BUFFER.lock() {
|
||||
buf.push(RawWinitEvent::MouseButton {
|
||||
button: mouse_button,
|
||||
state: *state,
|
||||
position,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WindowEvent::CursorMoved { position, .. } => {
|
||||
let pos = Vec2::new(position.x as f32, position.y as f32);
|
||||
|
||||
if let Ok(mut input_state) = INPUT_STATE.lock() {
|
||||
input_state.last_position = pos;
|
||||
|
||||
// Generate drag events for any pressed buttons
|
||||
if input_state.left_pressed || input_state.right_pressed || input_state.middle_pressed {
|
||||
if let Ok(mut buf) = BUFFER.lock() {
|
||||
buf.push(RawWinitEvent::CursorMoved { position: pos });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WindowEvent::KeyboardInput { event: key_event, .. } => {
|
||||
// Only handle physical keys
|
||||
if let PhysicalKey::Code(key_code) = key_event.physical_key {
|
||||
if let Ok(input_state) = INPUT_STATE.lock() {
|
||||
if let Ok(mut buf) = BUFFER.lock() {
|
||||
buf.push(RawWinitEvent::Keyboard {
|
||||
key: key_code,
|
||||
state: key_event.state,
|
||||
modifiers: input_state.modifiers,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WindowEvent::ModifiersChanged(new_modifiers) => {
|
||||
if let Ok(mut input_state) = INPUT_STATE.lock() {
|
||||
input_state.modifiers = Modifiers {
|
||||
shift: new_modifiers.state().shift_key(),
|
||||
ctrl: new_modifiers.state().control_key(),
|
||||
alt: new_modifiers.state().alt_key(),
|
||||
meta: new_modifiers.state().super_key(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
WindowEvent::MouseWheel { delta, .. } => {
|
||||
let scroll_delta = match delta {
|
||||
MouseScrollDelta::LineDelta(x, y) => Vec2::new(*x, *y) * 20.0, // Scale line deltas
|
||||
MouseScrollDelta::PixelDelta(pos) => Vec2::new(pos.x as f32, pos.y as f32),
|
||||
};
|
||||
|
||||
if let Ok(input_state) = INPUT_STATE.lock() {
|
||||
if let Ok(mut buf) = BUFFER.lock() {
|
||||
buf.push(RawWinitEvent::MouseWheel {
|
||||
delta: scroll_delta,
|
||||
position: input_state.last_position,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
/// Drain all buffered winit events and convert to InputEvents
|
||||
///
|
||||
/// Call this from your engine's input processing to consume events.
|
||||
pub fn drain_as_input_events() -> Vec<InputEvent> {
|
||||
BUFFER
|
||||
.lock()
|
||||
.ok()
|
||||
.map(|mut b| {
|
||||
std::mem::take(&mut *b)
|
||||
.into_iter()
|
||||
.filter_map(raw_to_input_event)
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Convert a raw winit event to an engine InputEvent
|
||||
fn raw_to_input_event(event: RawWinitEvent) -> Option<InputEvent> {
|
||||
match event {
|
||||
RawWinitEvent::MouseButton { button, state, position } => {
|
||||
let phase = match state {
|
||||
ElementState::Pressed => TouchPhase::Started,
|
||||
ElementState::Released => TouchPhase::Ended,
|
||||
};
|
||||
|
||||
Some(InputEvent::Mouse {
|
||||
pos: position,
|
||||
button,
|
||||
phase,
|
||||
})
|
||||
}
|
||||
|
||||
RawWinitEvent::CursorMoved { position } => {
|
||||
// Determine which button is pressed for drag events
|
||||
let input_state = INPUT_STATE.lock().ok()?;
|
||||
|
||||
let button = if input_state.left_pressed {
|
||||
MouseButton::Left
|
||||
} else if input_state.right_pressed {
|
||||
MouseButton::Right
|
||||
} else if input_state.middle_pressed {
|
||||
MouseButton::Middle
|
||||
} else {
|
||||
return None; // No button pressed, ignore
|
||||
};
|
||||
|
||||
Some(InputEvent::Mouse {
|
||||
pos: position,
|
||||
button,
|
||||
phase: TouchPhase::Moved,
|
||||
})
|
||||
}
|
||||
|
||||
RawWinitEvent::Keyboard { key, state, modifiers } => {
|
||||
Some(InputEvent::Keyboard {
|
||||
key,
|
||||
pressed: state == ElementState::Pressed,
|
||||
modifiers,
|
||||
})
|
||||
}
|
||||
|
||||
RawWinitEvent::MouseWheel { delta, position } => {
|
||||
Some(InputEvent::MouseWheel {
|
||||
delta,
|
||||
pos: position,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
10
crates/libmarathon/src/platform/ios/mod.rs
Normal file
10
crates/libmarathon/src/platform/ios/mod.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
//! iOS platform support
|
||||
//!
|
||||
//! This module contains iOS-specific input capture code.
|
||||
|
||||
pub mod pencil_bridge;
|
||||
|
||||
pub use pencil_bridge::{
|
||||
drain_as_input_events, drain_raw, pencil_point_received, swift_attach_pencil_capture,
|
||||
RawPencilPoint,
|
||||
};
|
||||
103
crates/libmarathon/src/platform/ios/pencil_bridge.rs
Normal file
103
crates/libmarathon/src/platform/ios/pencil_bridge.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
//! Apple Pencil input bridge for iOS
|
||||
//!
|
||||
//! This module captures raw Apple Pencil input via Swift/UIKit and converts
|
||||
//! it to engine-agnostic InputEvents.
|
||||
|
||||
use crate::engine::input_events::{InputEvent, TouchPhase};
|
||||
use glam::Vec2;
|
||||
use std::sync::Mutex;
|
||||
|
||||
/// Raw pencil point data from Swift UITouch
|
||||
///
|
||||
/// This matches the C struct defined in PencilBridge.h
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
#[repr(C)] // Use C memory layout so Swift can interop
|
||||
pub struct RawPencilPoint {
|
||||
/// Screen X coordinate in points (not pixels)
|
||||
pub x: f32,
|
||||
/// Screen Y coordinate in points (not pixels)
|
||||
pub y: f32,
|
||||
/// Force/pressure (0.0 - 4.0 on Apple Pencil)
|
||||
pub force: f32,
|
||||
/// Altitude angle in radians (0 = flat, π/2 = perpendicular)
|
||||
pub altitude: f32,
|
||||
/// Azimuth angle in radians (rotation around vertical)
|
||||
pub azimuth: f32,
|
||||
/// iOS timestamp (seconds since system boot)
|
||||
pub timestamp: f64,
|
||||
/// Touch phase: 0=began, 1=moved, 2=ended
|
||||
pub phase: u8,
|
||||
}
|
||||
|
||||
/// Thread-safe buffer for pencil points
|
||||
///
|
||||
/// Swift's main thread pushes points here via C FFI.
|
||||
/// Bevy's Update schedule drains them each frame.
|
||||
static BUFFER: Mutex<Vec<RawPencilPoint>> = Mutex::new(Vec::new());
|
||||
|
||||
/// FFI function called from Swift when a pencil point is received
|
||||
///
|
||||
/// This is exposed as a C function so Swift can call it.
|
||||
/// The `#[no_mangle]` prevents Rust from changing the function name.
|
||||
#[no_mangle]
|
||||
pub extern "C" fn pencil_point_received(point: RawPencilPoint) {
|
||||
if let Ok(mut buf) = BUFFER.lock() {
|
||||
buf.push(point);
|
||||
}
|
||||
}
|
||||
|
||||
/// Drain all buffered pencil points and convert to InputEvents
|
||||
///
|
||||
/// Call this from your Bevy Update system to consume input.
|
||||
pub fn drain_as_input_events() -> Vec<InputEvent> {
|
||||
BUFFER
|
||||
.lock()
|
||||
.ok()
|
||||
.map(|mut b| {
|
||||
std::mem::take(&mut *b)
|
||||
.into_iter()
|
||||
.map(raw_to_input_event)
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Drain raw pencil points without conversion
|
||||
///
|
||||
/// Useful for debugging or custom processing.
|
||||
pub fn drain_raw() -> Vec<RawPencilPoint> {
|
||||
BUFFER
|
||||
.lock()
|
||||
.ok()
|
||||
.map(|mut b| std::mem::take(&mut *b))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Convert a raw pencil point to an engine InputEvent
|
||||
fn raw_to_input_event(p: RawPencilPoint) -> InputEvent {
|
||||
InputEvent::Stylus {
|
||||
pos: Vec2::new(p.x, p.y),
|
||||
pressure: p.force,
|
||||
tilt: Vec2::new(p.altitude, p.azimuth),
|
||||
phase: match p.phase {
|
||||
0 => TouchPhase::Started,
|
||||
1 => TouchPhase::Moved,
|
||||
2 => TouchPhase::Ended,
|
||||
_ => TouchPhase::Cancelled,
|
||||
},
|
||||
timestamp: p.timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attach the pencil capture system to a UIView
|
||||
///
|
||||
/// This is only available on iOS. On other platforms, it's a no-op.
|
||||
#[cfg(target_os = "ios")]
|
||||
extern "C" {
|
||||
pub fn swift_attach_pencil_capture(view: *mut std::ffi::c_void);
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "ios"))]
|
||||
pub unsafe fn swift_attach_pencil_capture(_: *mut std::ffi::c_void) {
|
||||
// No-op on non-iOS platforms
|
||||
}
|
||||
43
crates/libmarathon/src/platform/ios/swift/PencilBridge.h
Normal file
43
crates/libmarathon/src/platform/ios/swift/PencilBridge.h
Normal file
@@ -0,0 +1,43 @@
|
||||
/**
|
||||
* C header for Rust-Swift interop
|
||||
*
|
||||
* This defines the interface between Rust and Swift.
|
||||
* Both sides include this header to ensure they agree on data types.
|
||||
*/
|
||||
|
||||
#ifndef PENCIL_BRIDGE_H
|
||||
#define PENCIL_BRIDGE_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/**
|
||||
* Raw pencil data from iOS UITouch
|
||||
*
|
||||
* This struct uses C types that both Rust and Swift understand.
|
||||
* The memory layout must match exactly on both sides.
|
||||
*/
|
||||
typedef struct {
|
||||
float x; // Screen X in points
|
||||
float y; // Screen Y in points
|
||||
float force; // Pressure (0.0 - 4.0)
|
||||
float altitude; // Angle from screen (radians)
|
||||
float azimuth; // Rotation angle (radians)
|
||||
double timestamp; // iOS system timestamp
|
||||
uint8_t phase; // 0=began, 1=moved, 2=ended
|
||||
} RawPencilPoint;
|
||||
|
||||
/**
|
||||
* Called from Swift when a pencil point is captured
|
||||
*
|
||||
* This is implemented in Rust (pencil_bridge.rs)
|
||||
*/
|
||||
void pencil_point_received(RawPencilPoint point);
|
||||
|
||||
/**
|
||||
* Attach pencil capture to a UIView
|
||||
*
|
||||
* This is implemented in Swift (PencilCapture.swift)
|
||||
*/
|
||||
void swift_attach_pencil_capture(void* view);
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,52 @@
|
||||
import UIKit
|
||||
|
||||
@_cdecl("swift_attach_pencil_capture")
|
||||
func swiftAttachPencilCapture(_ viewPtr: UnsafeMutableRawPointer) {
|
||||
DispatchQueue.main.async {
|
||||
let view = Unmanaged<UIView>.fromOpaque(viewPtr).takeUnretainedValue()
|
||||
let recognizer = PencilGestureRecognizer()
|
||||
recognizer.cancelsTouchesInView = false
|
||||
recognizer.delaysTouchesEnded = false
|
||||
view.addGestureRecognizer(recognizer)
|
||||
print("[Swift] Pencil capture attached")
|
||||
}
|
||||
}
|
||||
|
||||
class PencilGestureRecognizer: UIGestureRecognizer {
|
||||
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent) {
|
||||
state = .began
|
||||
send(touches, event: event, phase: 0)
|
||||
}
|
||||
|
||||
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent) {
|
||||
state = .changed
|
||||
send(touches, event: event, phase: 1)
|
||||
}
|
||||
|
||||
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent) {
|
||||
state = .ended
|
||||
send(touches, event: event, phase: 2)
|
||||
}
|
||||
|
||||
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent) {
|
||||
state = .cancelled
|
||||
send(touches, event: event, phase: 2)
|
||||
}
|
||||
|
||||
private func send(_ touches: Set<UITouch>, event: UIEvent?, phase: UInt8) {
|
||||
for touch in touches where touch.type == .pencil {
|
||||
for t in event?.coalescedTouches(for: touch) ?? [touch] {
|
||||
let loc = t.preciseLocation(in: view)
|
||||
pencil_point_received(RawPencilPoint(
|
||||
x: Float(loc.x),
|
||||
y: Float(loc.y),
|
||||
force: Float(t.force),
|
||||
altitude: Float(t.altitudeAngle),
|
||||
azimuth: Float(t.azimuthAngle(in: view)),
|
||||
timestamp: t.timestamp,
|
||||
phase: phase
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
10
crates/libmarathon/src/platform/mod.rs
Normal file
10
crates/libmarathon/src/platform/mod.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
//! Platform-specific input bridges
|
||||
//!
|
||||
//! This module contains platform-specific code for capturing input
|
||||
//! and converting it to engine-agnostic InputEvents.
|
||||
|
||||
#[cfg(target_os = "ios")]
|
||||
pub mod ios;
|
||||
|
||||
#[cfg(not(target_os = "ios"))]
|
||||
pub mod desktop;
|
||||
225
crates/libmarathon/src/sync.rs
Normal file
225
crates/libmarathon/src/sync.rs
Normal file
@@ -0,0 +1,225 @@
|
||||
use std::ops::Deref;
|
||||
|
||||
use chrono::{
|
||||
DateTime,
|
||||
Utc,
|
||||
};
|
||||
// Re-export common CRDT types from the crdts library
|
||||
pub use crdts::{
|
||||
CmRDT,
|
||||
CvRDT,
|
||||
ctx::ReadCtx,
|
||||
lwwreg::LWWReg,
|
||||
map::Map,
|
||||
orswot::Orswot,
|
||||
};
|
||||
use serde::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
// Re-export the Synced derive macro
|
||||
pub use sync_macros::Synced;
|
||||
|
||||
pub type NodeId = uuid::Uuid;
|
||||
|
||||
/// Transparent wrapper for synced values
|
||||
///
|
||||
/// This wraps any value with LWW semantics but allows you to use it like a
|
||||
/// normal value
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SyncedValue<T: Clone> {
|
||||
value: T,
|
||||
timestamp: DateTime<Utc>,
|
||||
node_id: NodeId,
|
||||
}
|
||||
|
||||
impl<T: Clone> SyncedValue<T> {
|
||||
pub fn new(value: T, node_id: NodeId) -> Self {
|
||||
Self {
|
||||
value,
|
||||
timestamp: Utc::now(),
|
||||
node_id,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self) -> &T {
|
||||
&self.value
|
||||
}
|
||||
|
||||
pub fn set(&mut self, value: T, node_id: NodeId) {
|
||||
self.value = value;
|
||||
self.timestamp = Utc::now();
|
||||
self.node_id = node_id;
|
||||
}
|
||||
|
||||
pub fn apply_lww(&mut self, value: T, timestamp: DateTime<Utc>, node_id: NodeId) {
|
||||
if timestamp > self.timestamp || (timestamp == self.timestamp && node_id > self.node_id) {
|
||||
self.value = value;
|
||||
self.timestamp = timestamp;
|
||||
self.node_id = node_id;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn merge(&mut self, other: &Self) {
|
||||
// Only clone if we're actually going to use the values (when other is newer)
|
||||
if other.timestamp > self.timestamp ||
|
||||
(other.timestamp == self.timestamp && other.node_id > self.node_id)
|
||||
{
|
||||
self.value = other.value.clone();
|
||||
self.timestamp = other.timestamp;
|
||||
self.node_id = other.node_id; // UUID is Copy, no need to clone
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow transparent read-only access to the inner value
|
||||
// Note: DerefMut is intentionally NOT implemented to preserve LWW semantics
|
||||
// Use `.set()` method to update values, which properly updates timestamps
|
||||
impl<T: Clone> Deref for SyncedValue<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for a sync message that goes over gossip
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SyncMessage<T> {
|
||||
/// Unique message ID
|
||||
pub message_id: String,
|
||||
/// Node that sent this
|
||||
pub node_id: NodeId,
|
||||
/// When it was sent
|
||||
pub timestamp: DateTime<Utc>,
|
||||
/// The actual sync operation
|
||||
pub operation: T,
|
||||
}
|
||||
|
||||
impl<T: Serialize> SyncMessage<T> {
|
||||
pub fn new(node_id: NodeId, operation: T) -> Self {
|
||||
use std::sync::atomic::{
|
||||
AtomicU64,
|
||||
Ordering,
|
||||
};
|
||||
static COUNTER: AtomicU64 = AtomicU64::new(0);
|
||||
let seq = COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
Self {
|
||||
message_id: format!("{}-{}-{}", node_id, Utc::now().timestamp_millis(), seq),
|
||||
node_id,
|
||||
timestamp: Utc::now(),
|
||||
operation,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_bytes(&self) -> anyhow::Result<Vec<u8>> {
|
||||
Ok(serde_json::to_vec(self)?)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: for<'de> Deserialize<'de>> SyncMessage<T> {
|
||||
pub fn from_bytes(bytes: &[u8]) -> anyhow::Result<Self> {
|
||||
Ok(serde_json::from_slice(bytes)?)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper trait for types that can be synced
|
||||
pub trait Syncable: Sized {
|
||||
type Operation: Serialize + for<'de> Deserialize<'de> + Clone;
|
||||
|
||||
/// Apply a sync operation to this value
|
||||
fn apply_sync_op(&mut self, op: &Self::Operation);
|
||||
|
||||
/// Get the node ID for this instance
|
||||
fn node_id(&self) -> &NodeId;
|
||||
|
||||
/// Create a sync message for an operation
|
||||
fn create_sync_message(&self, op: Self::Operation) -> SyncMessage<Self::Operation> {
|
||||
SyncMessage::new(*self.node_id(), op) // UUID is Copy, dereference instead of clone
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_synced_value() {
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let mut val = SyncedValue::new(42, node1);
|
||||
assert_eq!(*val.get(), 42);
|
||||
|
||||
val.set(100, node1);
|
||||
assert_eq!(*val.get(), 100);
|
||||
|
||||
// Test LWW semantics
|
||||
let node2 = uuid::Uuid::new_v4();
|
||||
let old_time = Utc::now() - chrono::Duration::seconds(10);
|
||||
val.apply_lww(50, old_time, node2);
|
||||
assert_eq!(*val.get(), 100); // Should not update with older timestamp
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sync_message() {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct TestOp {
|
||||
value: i32,
|
||||
}
|
||||
|
||||
let node1 = uuid::Uuid::new_v4();
|
||||
let op = TestOp { value: 42 };
|
||||
let msg = SyncMessage::new(node1, op);
|
||||
|
||||
let bytes = msg.to_bytes().unwrap();
|
||||
let decoded = SyncMessage::<TestOp>::from_bytes(&bytes).unwrap();
|
||||
|
||||
assert_eq!(decoded.node_id, node1);
|
||||
assert_eq!(decoded.operation.value, 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uuid_comparison() {
|
||||
let node1 = uuid::Uuid::from_u128(1);
|
||||
let node2 = uuid::Uuid::from_u128(2);
|
||||
|
||||
println!("node1: {}", node1);
|
||||
println!("node2: {}", node2);
|
||||
println!("node2 > node1: {}", node2 > node1);
|
||||
|
||||
assert!(node2 > node1, "UUID from_u128(2) should be > from_u128(1)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lww_tiebreaker() {
|
||||
let node1 = uuid::Uuid::from_u128(1);
|
||||
let node2 = uuid::Uuid::from_u128(2);
|
||||
|
||||
// Create SyncedValue FIRST, then capture a timestamp that's guaranteed to be
|
||||
// newer
|
||||
let mut lww = SyncedValue::new(100, node1);
|
||||
std::thread::sleep(std::time::Duration::from_millis(1)); // Ensure ts is after init
|
||||
let ts = Utc::now();
|
||||
|
||||
// Apply update from node1 at timestamp ts
|
||||
lww.apply_lww(100, ts, node1);
|
||||
println!(
|
||||
"After node1 update: value={}, ts={:?}, node={}",
|
||||
lww.get(),
|
||||
lww.timestamp,
|
||||
lww.node_id
|
||||
);
|
||||
|
||||
// Apply conflicting update from node2 at SAME timestamp
|
||||
lww.apply_lww(200, ts, node2);
|
||||
println!(
|
||||
"After node2 update: value={}, ts={:?}, node={}",
|
||||
lww.get(),
|
||||
lww.timestamp,
|
||||
lww.node_id
|
||||
);
|
||||
|
||||
// node2 > node1, so value2 should win
|
||||
assert_eq!(*lww.get(), 200, "Higher node_id should win tiebreaker");
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user