initial arhitectural overhaul

Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
2025-12-13 22:22:05 +00:00
parent b098a19d6b
commit 5cb258fe6b
99 changed files with 4137 additions and 311 deletions

View File

@@ -0,0 +1,253 @@
//! Configuration for the persistence layer
use std::time::Duration;
use serde::{
Deserialize,
Serialize,
};
use crate::persistence::error::Result;
/// Default critical flush delay in milliseconds
const DEFAULT_CRITICAL_FLUSH_DELAY_MS: u64 = 1000;
/// Default maximum buffer operations before forced flush
const DEFAULT_MAX_BUFFER_OPERATIONS: usize = 1000;
/// Configuration for the persistence layer
#[derive(Debug, Clone, Serialize, Deserialize, bevy::prelude::Resource)]
pub struct PersistenceConfig {
/// Base flush interval (may be adjusted by battery level)
pub flush_interval_secs: u64,
/// Max time to defer critical writes (entity creation, etc.)
pub critical_flush_delay_ms: u64,
/// WAL checkpoint interval
pub checkpoint_interval_secs: u64,
/// Max WAL size before forced checkpoint (in bytes)
pub max_wal_size_bytes: usize,
/// Maximum number of operations in write buffer before forcing flush
pub max_buffer_operations: usize,
/// Enable adaptive flushing based on battery
pub battery_adaptive: bool,
/// Battery tier configuration
pub battery_tiers: BatteryTiers,
/// Platform-specific settings
#[serde(default)]
pub platform: PlatformConfig,
}
impl Default for PersistenceConfig {
fn default() -> Self {
Self {
flush_interval_secs: 10,
critical_flush_delay_ms: DEFAULT_CRITICAL_FLUSH_DELAY_MS,
checkpoint_interval_secs: 30,
max_wal_size_bytes: 5 * 1024 * 1024, // 5MB
max_buffer_operations: DEFAULT_MAX_BUFFER_OPERATIONS,
battery_adaptive: true,
battery_tiers: BatteryTiers::default(),
platform: PlatformConfig::default(),
}
}
}
impl PersistenceConfig {
/// Get the flush interval based on battery status
pub fn get_flush_interval(&self, battery_level: f32, is_charging: bool) -> Duration {
if !self.battery_adaptive {
return Duration::from_secs(self.flush_interval_secs);
}
let interval_secs = if is_charging {
self.battery_tiers.charging
} else if battery_level > 0.5 {
self.battery_tiers.high
} else if battery_level > 0.2 {
self.battery_tiers.medium
} else {
self.battery_tiers.low
};
Duration::from_secs(interval_secs)
}
/// Get the critical flush delay
pub fn get_critical_flush_delay(&self) -> Duration {
Duration::from_millis(self.critical_flush_delay_ms)
}
/// Get the checkpoint interval
pub fn get_checkpoint_interval(&self) -> Duration {
Duration::from_secs(self.checkpoint_interval_secs)
}
}
/// Battery tier flush intervals (in seconds)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BatteryTiers {
/// Flush interval when charging
pub charging: u64,
/// Flush interval when battery > 50%
pub high: u64,
/// Flush interval when battery 20-50%
pub medium: u64,
/// Flush interval when battery < 20%
pub low: u64,
}
impl Default for BatteryTiers {
fn default() -> Self {
Self {
charging: 5,
high: 10,
medium: 30,
low: 60,
}
}
}
/// Platform-specific configuration
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct PlatformConfig {
/// iOS-specific settings
#[serde(default)]
pub ios: IosConfig,
}
/// iOS-specific configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IosConfig {
/// How long to wait for background flush before giving up (seconds)
pub background_flush_timeout_secs: u64,
/// Flush interval when in low power mode (seconds)
pub low_power_mode_interval_secs: u64,
}
impl Default for IosConfig {
fn default() -> Self {
Self {
background_flush_timeout_secs: 5,
low_power_mode_interval_secs: 60,
}
}
}
/// Load persistence configuration from a TOML string
///
/// Parses TOML configuration and validates all settings. Use this for
/// loading configuration from embedded strings or dynamic sources.
///
/// # Parameters
/// - `toml`: TOML-formatted configuration string
///
/// # Returns
/// - `Ok(PersistenceConfig)`: Parsed and validated configuration
/// - `Err`: If TOML is invalid or contains invalid values
///
/// # Example TOML
/// ```toml
/// flush_interval_secs = 10
/// battery_adaptive = true
/// [battery_tiers]
/// charging = 5
/// high = 10
/// ```
pub fn load_config_from_str(toml: &str) -> Result<PersistenceConfig> {
Ok(toml::from_str(toml)?)
}
/// Load persistence configuration from a TOML file
///
/// Reads and parses a TOML configuration file. This is the recommended way
/// to load configuration for production use, allowing runtime configuration
/// changes without recompilation.
///
/// # Parameters
/// - `path`: Path to TOML configuration file
///
/// # Returns
/// - `Ok(PersistenceConfig)`: Loaded configuration
/// - `Err`: If file can't be read or TOML is invalid
///
/// # Examples
/// ```no_run
/// # use libmarathon::persistence::*;
/// # fn example() -> Result<()> {
/// let config = load_config_from_file("persistence.toml")?;
/// # Ok(())
/// # }
/// ```
pub fn load_config_from_file(path: impl AsRef<std::path::Path>) -> Result<PersistenceConfig> {
let content = std::fs::read_to_string(path)?;
Ok(load_config_from_str(&content)?)
}
/// Serialize persistence configuration to a TOML string
///
/// Converts configuration to human-readable TOML format. Use this to
/// save configuration to files or display current settings.
///
/// # Parameters
/// - `config`: Configuration to serialize
///
/// # Returns
/// - `Ok(String)`: Pretty-printed TOML configuration
/// - `Err`: If serialization fails (rare)
pub fn save_config_to_str(config: &PersistenceConfig) -> Result<String> {
Ok(toml::to_string_pretty(config)?)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_config() {
let config = PersistenceConfig::default();
assert_eq!(config.flush_interval_secs, 10);
assert_eq!(config.battery_adaptive, true);
}
#[test]
fn test_battery_adaptive_intervals() {
let config = PersistenceConfig::default();
// Charging
let interval = config.get_flush_interval(0.3, true);
assert_eq!(interval, Duration::from_secs(5));
// High battery
let interval = config.get_flush_interval(0.8, false);
assert_eq!(interval, Duration::from_secs(10));
// Medium battery
let interval = config.get_flush_interval(0.4, false);
assert_eq!(interval, Duration::from_secs(30));
// Low battery
let interval = config.get_flush_interval(0.1, false);
assert_eq!(interval, Duration::from_secs(60));
}
#[test]
fn test_config_serialization() {
let config = PersistenceConfig::default();
let toml = save_config_to_str(&config).unwrap();
let loaded = load_config_from_str(&toml).unwrap();
assert_eq!(config.flush_interval_secs, loaded.flush_interval_secs);
assert_eq!(config.battery_adaptive, loaded.battery_adaptive);
}
}

View File

@@ -0,0 +1,716 @@
//! Database schema and operations for persistence layer
use std::path::Path;
use chrono::Utc;
use rusqlite::{
Connection,
OptionalExtension,
};
use crate::persistence::{
error::{
PersistenceError,
Result,
},
types::*,
};
/// Default SQLite page size in bytes (4KB)
const DEFAULT_PAGE_SIZE: i64 = 4096;
/// Cache size for SQLite in KB (negative value = KB instead of pages)
const CACHE_SIZE_KB: i64 = -20000; // 20MB
/// Get current Unix timestamp in seconds
///
/// Helper to avoid repeating `Utc::now().timestamp()` throughout the code
#[inline]
fn current_timestamp() -> i64 {
Utc::now().timestamp()
}
/// Initialize SQLite connection with WAL mode and optimizations
pub fn initialize_persistence_db<P: AsRef<Path>>(path: P) -> Result<Connection> {
let mut conn = Connection::open(path)?;
configure_sqlite_for_persistence(&conn)?;
// Run migrations to ensure schema is up to date
crate::persistence::run_migrations(&mut conn)?;
Ok(conn)
}
/// Configure SQLite with WAL mode and battery-friendly settings
pub fn configure_sqlite_for_persistence(conn: &Connection) -> Result<()> {
// Enable Write-Ahead Logging for better concurrency and fewer fsyncs
conn.execute_batch("PRAGMA journal_mode = WAL;")?;
// Don't auto-checkpoint on every transaction - we'll control this manually
conn.execute_batch("PRAGMA wal_autocheckpoint = 0;")?;
// NORMAL synchronous mode - fsync WAL on commit, but not every write
// This is a good balance between durability and performance
conn.execute_batch("PRAGMA synchronous = NORMAL;")?;
// Larger page size for better sequential write performance on mobile
// Note: This must be set before the database is created or after VACUUM
// We'll skip setting it if database already exists to avoid issues
let page_size: i64 = conn.query_row("PRAGMA page_size", [], |row| row.get(0))?;
if page_size == DEFAULT_PAGE_SIZE {
// Try to set larger page size, but only if we're at default
// This will only work on a fresh database
let _ = conn.execute_batch("PRAGMA page_size = 8192;");
}
// Increase cache size for better performance (in pages, negative = KB)
conn.execute_batch(&format!("PRAGMA cache_size = {};", CACHE_SIZE_KB))?;
// Use memory for temp tables (faster, we don't need temp table durability)
conn.execute_batch("PRAGMA temp_store = MEMORY;")?;
Ok(())
}
/// Create the database schema for persistence
pub fn create_persistence_schema(conn: &Connection) -> Result<()> {
// Entities table - stores entity metadata
conn.execute(
"CREATE TABLE IF NOT EXISTS entities (
id BLOB PRIMARY KEY,
entity_type TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL
)",
[],
)?;
// Components table - stores serialized component data
conn.execute(
"CREATE TABLE IF NOT EXISTS components (
entity_id BLOB NOT NULL,
component_type TEXT NOT NULL,
data BLOB NOT NULL,
updated_at INTEGER NOT NULL,
PRIMARY KEY (entity_id, component_type),
FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE
)",
[],
)?;
// Index for querying components by entity
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_components_entity
ON components(entity_id)",
[],
)?;
// Operation log - for CRDT sync protocol
conn.execute(
"CREATE TABLE IF NOT EXISTS operation_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
node_id TEXT NOT NULL,
sequence_number INTEGER NOT NULL,
operation BLOB NOT NULL,
timestamp INTEGER NOT NULL,
UNIQUE(node_id, sequence_number)
)",
[],
)?;
// Index for efficient operation log queries
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_oplog_node_seq
ON operation_log(node_id, sequence_number)",
[],
)?;
// Vector clock table - for causality tracking
conn.execute(
"CREATE TABLE IF NOT EXISTS vector_clock (
node_id TEXT PRIMARY KEY,
counter INTEGER NOT NULL,
updated_at INTEGER NOT NULL
)",
[],
)?;
// Session state table - for crash detection
conn.execute(
"CREATE TABLE IF NOT EXISTS session_state (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at INTEGER NOT NULL
)",
[],
)?;
// WAL checkpoint tracking
conn.execute(
"CREATE TABLE IF NOT EXISTS checkpoint_state (
last_checkpoint INTEGER NOT NULL,
wal_size_bytes INTEGER NOT NULL
)",
[],
)?;
// Initialize checkpoint state if not exists
conn.execute(
"INSERT OR IGNORE INTO checkpoint_state (rowid, last_checkpoint, wal_size_bytes)
VALUES (1, ?, 0)",
[current_timestamp()],
)?;
Ok(())
}
/// Flush a batch of operations to SQLite in a single transaction
pub fn flush_to_sqlite(ops: &[PersistenceOp], conn: &mut Connection) -> Result<usize> {
if ops.is_empty() {
return Ok(0);
}
let tx = conn.transaction()?;
let mut count = 0;
for op in ops {
match op {
| PersistenceOp::UpsertEntity { id, data } => {
tx.execute(
"INSERT OR REPLACE INTO entities (id, entity_type, created_at, updated_at)
VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![
id.as_bytes(),
data.entity_type,
data.created_at.timestamp(),
data.updated_at.timestamp(),
],
)?;
count += 1;
},
| PersistenceOp::UpsertComponent {
entity_id,
component_type,
data,
} => {
tx.execute(
"INSERT OR REPLACE INTO components (entity_id, component_type, data, updated_at)
VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![
entity_id.as_bytes(),
component_type,
data,
current_timestamp(),
],
)?;
count += 1;
},
| PersistenceOp::LogOperation {
node_id,
sequence,
operation,
} => {
tx.execute(
"INSERT OR REPLACE INTO operation_log (node_id, sequence_number, operation, timestamp)
VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![
&node_id.to_string(), // Convert UUID to string for SQLite TEXT column
sequence,
operation,
current_timestamp(),
],
)?;
count += 1;
},
| PersistenceOp::UpdateVectorClock { node_id, counter } => {
tx.execute(
"INSERT OR REPLACE INTO vector_clock (node_id, counter, updated_at)
VALUES (?1, ?2, ?3)",
rusqlite::params![&node_id.to_string(), counter, current_timestamp()], // Convert UUID to string
)?;
count += 1;
},
| PersistenceOp::DeleteEntity { id } => {
tx.execute(
"DELETE FROM entities WHERE id = ?1",
rusqlite::params![id.as_bytes()],
)?;
count += 1;
},
| PersistenceOp::DeleteComponent {
entity_id,
component_type,
} => {
tx.execute(
"DELETE FROM components WHERE entity_id = ?1 AND component_type = ?2",
rusqlite::params![entity_id.as_bytes(), component_type],
)?;
count += 1;
},
}
}
tx.commit()?;
Ok(count)
}
/// Manually checkpoint the WAL file to merge changes into the main database
///
/// This function performs a SQLite WAL checkpoint, which copies frames from the
/// write-ahead log back into the main database file. This is crucial for:
/// - Reducing WAL file size to save disk space
/// - Ensuring durability of committed transactions
/// - Maintaining database integrity
///
/// # Parameters
/// - `conn`: Mutable reference to the SQLite connection
/// - `mode`: Checkpoint mode controlling blocking behavior (see
/// [`CheckpointMode`])
///
/// # Returns
/// - `Ok(CheckpointInfo)`: Information about the checkpoint operation
/// - `Err`: If the checkpoint fails or database state update fails
///
/// # Examples
/// ```no_run
/// # use rusqlite::Connection;
/// # use libmarathon::persistence::*;
/// # fn example() -> anyhow::Result<()> {
/// let mut conn = Connection::open("app.db")?;
/// let info = checkpoint_wal(&mut conn, CheckpointMode::Passive)?;
/// if info.busy {
/// // Some pages couldn't be checkpointed due to active readers
/// }
/// # Ok(())
/// # }
/// ```
pub fn checkpoint_wal(conn: &mut Connection, mode: CheckpointMode) -> Result<CheckpointInfo> {
let mode_str = match mode {
| CheckpointMode::Passive => "PASSIVE",
| CheckpointMode::Full => "FULL",
| CheckpointMode::Restart => "RESTART",
| CheckpointMode::Truncate => "TRUNCATE",
};
let query = format!("PRAGMA wal_checkpoint({})", mode_str);
// Returns (busy, log_pages, checkpointed_pages)
let (busy, log_pages, checkpointed_pages): (i32, i32, i32) =
conn.query_row(&query, [], |row| {
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
})?;
// Update checkpoint state
conn.execute(
"UPDATE checkpoint_state SET last_checkpoint = ?1 WHERE rowid = 1",
[current_timestamp()],
)?;
Ok(CheckpointInfo {
busy: busy != 0,
log_pages,
checkpointed_pages,
})
}
/// Get the size of the WAL file in bytes
///
/// This checks the actual WAL file size on disk without triggering a
/// checkpoint. Large WAL files consume disk space and can slow down recovery,
/// so monitoring size helps maintain optimal performance.
///
/// # Parameters
/// - `conn`: Reference to the SQLite connection
///
/// # Returns
/// - `Ok(i64)`: WAL file size in bytes (0 if no WAL exists or in-memory
/// database)
/// - `Err`: If the database path query fails
///
/// # Note
/// For in-memory databases, always returns 0.
pub fn get_wal_size(conn: &Connection) -> Result<i64> {
// Get the database file path
let db_path: Option<String> = conn
.query_row("PRAGMA database_list", [], |row| row.get::<_, String>(2))
.optional()?;
// If no path (in-memory database), return 0
let Some(db_path) = db_path else {
return Ok(0);
};
// WAL file has same name as database but with -wal suffix
let wal_path = format!("{}-wal", db_path);
// Check if WAL file exists and get its size
match std::fs::metadata(&wal_path) {
| Ok(metadata) => Ok(metadata.len() as i64),
| Err(_) => Ok(0), // WAL doesn't exist yet
}
}
/// Checkpoint mode for WAL
#[derive(Debug, Clone, Copy)]
pub enum CheckpointMode {
/// Passive checkpoint - doesn't block readers/writers
Passive,
/// Full checkpoint - waits for writers to finish
Full,
/// Restart checkpoint - like Full, but restarts WAL file
Restart,
/// Truncate checkpoint - like Restart, but truncates WAL file to 0 bytes
Truncate,
}
/// Information about a checkpoint operation
#[derive(Debug)]
pub struct CheckpointInfo {
pub busy: bool,
pub log_pages: i32,
pub checkpointed_pages: i32,
}
/// Set a session state value in the database
///
/// Session state is used to track application lifecycle events and detect
/// crashes. Values persist across restarts, enabling crash detection and
/// recovery.
///
/// # Parameters
/// - `conn`: Mutable reference to the SQLite connection
/// - `key`: State key (e.g., "clean_shutdown", "session_id")
/// - `value`: State value to store
///
/// # Returns
/// - `Ok(())`: State was successfully saved
/// - `Err`: If the database write fails
pub fn set_session_state(conn: &mut Connection, key: &str, value: &str) -> Result<()> {
conn.execute(
"INSERT OR REPLACE INTO session_state (key, value, updated_at)
VALUES (?1, ?2, ?3)",
rusqlite::params![key, value, current_timestamp()],
)?;
Ok(())
}
/// Get a session state value from the database
///
/// Retrieves persistent state information stored across application sessions.
///
/// # Parameters
/// - `conn`: Reference to the SQLite connection
/// - `key`: State key to retrieve
///
/// # Returns
/// - `Ok(Some(value))`: State exists and was retrieved
/// - `Ok(None)`: State key doesn't exist
/// - `Err`: If the database query fails
pub fn get_session_state(conn: &Connection, key: &str) -> Result<Option<String>> {
conn.query_row(
"SELECT value FROM session_state WHERE key = ?1",
rusqlite::params![key],
|row| row.get(0),
)
.optional()
.map_err(|e| PersistenceError::Database(e))
}
/// Check if the previous session had a clean shutdown
///
/// This is critical for crash detection. When the application starts, this
/// checks if the previous session ended cleanly. If not, it indicates a crash
/// occurred, and recovery procedures may be needed.
///
/// **Side effect**: Resets the clean_shutdown flag to "false" for the current
/// session. Call [`mark_clean_shutdown`] during normal shutdown to set it back
/// to "true".
///
/// # Parameters
/// - `conn`: Mutable reference to the SQLite connection (mutates session state)
///
/// # Returns
/// - `Ok(true)`: Previous session shut down cleanly
/// - `Ok(false)`: Previous session crashed or this is first run
/// - `Err`: If database operations fail
pub fn check_clean_shutdown(conn: &mut Connection) -> Result<bool> {
let clean = get_session_state(conn, "clean_shutdown")?
.map(|v| v == "true")
.unwrap_or(false);
// Reset for this session
set_session_state(conn, "clean_shutdown", "false")?;
Ok(clean)
}
/// Mark the current session as cleanly shut down
///
/// Call this during normal application shutdown to indicate clean termination.
/// The next startup will detect this flag via [`check_clean_shutdown`] and know
/// no crash occurred.
///
/// # Parameters
/// - `conn`: Mutable reference to the SQLite connection
///
/// # Returns
/// - `Ok(())`: Clean shutdown flag was set
/// - `Err`: If the database write fails
pub fn mark_clean_shutdown(conn: &mut Connection) -> Result<()> {
set_session_state(conn, "clean_shutdown", "true")
}
//
// ============================================================================
// Session Management Operations
// ============================================================================
//
/// Save session metadata to database
pub fn save_session(conn: &mut Connection, session: &crate::networking::Session) -> Result<()> {
conn.execute(
"INSERT OR REPLACE INTO sessions (id, code, name, created_at, last_active, entity_count, state, secret)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
rusqlite::params![
session.id.as_uuid().as_bytes(),
session.id.to_code(),
session.name,
session.created_at,
session.last_active,
session.entity_count as i64,
session.state.to_string(),
session.secret,
],
)?;
Ok(())
}
/// Load session by ID
pub fn load_session(
conn: &Connection,
session_id: crate::networking::SessionId,
) -> Result<Option<crate::networking::Session>> {
conn.query_row(
"SELECT code, name, created_at, last_active, entity_count, state, secret
FROM sessions WHERE id = ?1",
[session_id.as_uuid().as_bytes()],
|row| {
let code: String = row.get(0)?;
let state_str: String = row.get(5)?;
let state = crate::networking::SessionState::from_str(&state_str)
.unwrap_or(crate::networking::SessionState::Created);
// Reconstruct SessionId from the stored code
let id = crate::networking::SessionId::from_code(&code)
.map_err(|_| rusqlite::Error::InvalidQuery)?;
Ok(crate::networking::Session {
id,
name: row.get(1)?,
created_at: row.get(2)?,
last_active: row.get(3)?,
entity_count: row.get::<_, i64>(4)? as usize,
state,
secret: row.get(6)?,
})
},
)
.optional()
.map_err(PersistenceError::from)
}
/// Get the most recently active session
pub fn get_last_active_session(conn: &Connection) -> Result<Option<crate::networking::Session>> {
conn.query_row(
"SELECT code, name, created_at, last_active, entity_count, state, secret
FROM sessions ORDER BY last_active DESC LIMIT 1",
[],
|row| {
let code: String = row.get(0)?;
let state_str: String = row.get(5)?;
let state = crate::networking::SessionState::from_str(&state_str)
.unwrap_or(crate::networking::SessionState::Created);
// Reconstruct SessionId from the stored code
let id = crate::networking::SessionId::from_code(&code)
.map_err(|_| rusqlite::Error::InvalidQuery)?;
Ok(crate::networking::Session {
id,
name: row.get(1)?,
created_at: row.get(2)?,
last_active: row.get(3)?,
entity_count: row.get::<_, i64>(4)? as usize,
state,
secret: row.get(6)?,
})
},
)
.optional()
.map_err(PersistenceError::from)
}
/// Save session vector clock to database
pub fn save_session_vector_clock(
conn: &mut Connection,
session_id: crate::networking::SessionId,
clock: &crate::networking::VectorClock,
) -> Result<()> {
let tx = conn.transaction()?;
// Delete old clock entries for this session
tx.execute(
"DELETE FROM vector_clock WHERE session_id = ?1",
[session_id.as_uuid().as_bytes()],
)?;
// Insert current clock state
for (node_id, &counter) in &clock.clocks {
tx.execute(
"INSERT INTO vector_clock (session_id, node_id, counter, updated_at)
VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![
session_id.as_uuid().as_bytes(),
node_id.to_string(),
counter as i64,
current_timestamp(),
],
)?;
}
tx.commit()?;
Ok(())
}
/// Load session vector clock from database
pub fn load_session_vector_clock(
conn: &Connection,
session_id: crate::networking::SessionId,
) -> Result<crate::networking::VectorClock> {
let mut stmt =
conn.prepare("SELECT node_id, counter FROM vector_clock WHERE session_id = ?1")?;
let mut clock = crate::networking::VectorClock::new();
let rows = stmt.query_map([session_id.as_uuid().as_bytes()], |row| {
let node_id_str: String = row.get(0)?;
let counter: i64 = row.get(1)?;
Ok((node_id_str, counter))
})?;
for row in rows {
let (node_id_str, counter) = row?;
if let Ok(node_id) = uuid::Uuid::parse_str(&node_id_str) {
clock.clocks.insert(node_id, counter as u64);
}
}
Ok(clock)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_database_initialization() -> Result<()> {
let conn = Connection::open_in_memory()?;
configure_sqlite_for_persistence(&conn)?;
create_persistence_schema(&conn)?;
// Verify tables exist
let tables: Vec<String> = conn
.prepare("SELECT name FROM sqlite_master WHERE type='table'")?
.query_map([], |row| row.get(0))?
.collect::<std::result::Result<Vec<_>, _>>()?;
assert!(tables.contains(&"entities".to_string()));
assert!(tables.contains(&"components".to_string()));
assert!(tables.contains(&"operation_log".to_string()));
assert!(tables.contains(&"vector_clock".to_string()));
Ok(())
}
#[test]
fn test_flush_operations() -> Result<()> {
let mut conn = Connection::open_in_memory()?;
create_persistence_schema(&conn)?;
let entity_id = uuid::Uuid::new_v4();
let ops = vec![
PersistenceOp::UpsertEntity {
id: entity_id,
data: EntityData {
id: entity_id,
created_at: Utc::now(),
updated_at: Utc::now(),
entity_type: "TestEntity".to_string(),
},
},
PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![1, 2, 3, 4],
},
];
let count = flush_to_sqlite(&ops, &mut conn)?;
assert_eq!(count, 2);
// Verify entity exists
let exists: bool = conn.query_row(
"SELECT COUNT(*) > 0 FROM entities WHERE id = ?1",
rusqlite::params![entity_id.as_bytes()],
|row| row.get(0),
)?;
assert!(exists);
Ok(())
}
#[test]
fn test_session_state() -> Result<()> {
let mut conn = Connection::open_in_memory()?;
create_persistence_schema(&conn)?;
set_session_state(&mut conn, "test_key", "test_value")?;
let value = get_session_state(&conn, "test_key")?;
assert_eq!(value, Some("test_value".to_string()));
Ok(())
}
#[test]
fn test_crash_recovery() -> Result<()> {
let mut conn = Connection::open_in_memory()?;
create_persistence_schema(&conn)?;
// Simulate first startup - should report as crash (no clean shutdown marker)
let clean = check_clean_shutdown(&mut conn)?;
assert!(!clean, "First startup should be detected as crash");
// Mark clean shutdown
mark_clean_shutdown(&mut conn)?;
// Next startup should report clean shutdown
let clean = check_clean_shutdown(&mut conn)?;
assert!(clean, "Should detect clean shutdown");
// After checking clean shutdown, flag should be reset to false
// So if we check again without marking, it should report as crash
let value = get_session_state(&conn, "clean_shutdown")?;
assert_eq!(
value,
Some("false".to_string()),
"Flag should be reset after check"
);
Ok(())
}
}

View File

@@ -0,0 +1,124 @@
//! Error types for the persistence layer
use std::fmt;
/// Result type for persistence operations
pub type Result<T> = std::result::Result<T, PersistenceError>;
/// Errors that can occur in the persistence layer
#[derive(Debug)]
pub enum PersistenceError {
/// Database operation failed
Database(rusqlite::Error),
/// Serialization failed
Serialization(bincode::Error),
/// Deserialization failed
Deserialization(String),
/// Configuration error
Config(String),
/// I/O error (file operations, WAL checks, etc.)
Io(std::io::Error),
/// Type not found in registry
TypeNotRegistered(String),
/// Entity or component not found
NotFound(String),
/// Circuit breaker is open, operation blocked
CircuitBreakerOpen {
consecutive_failures: u32,
retry_after_secs: u64,
},
/// Component data exceeds maximum size
ComponentTooLarge {
component_type: String,
size_bytes: usize,
max_bytes: usize,
},
/// Other error
Other(String),
}
impl fmt::Display for PersistenceError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
| Self::Database(err) => write!(f, "Database error: {}", err),
| Self::Serialization(err) => write!(f, "Serialization error: {}", err),
| Self::Deserialization(msg) => write!(f, "Deserialization error: {}", msg),
| Self::Config(msg) => write!(f, "Configuration error: {}", msg),
| Self::Io(err) => write!(f, "I/O error: {}", err),
| Self::TypeNotRegistered(type_name) => {
write!(f, "Type not registered in type registry: {}", type_name)
},
| Self::NotFound(msg) => write!(f, "Not found: {}", msg),
| Self::CircuitBreakerOpen {
consecutive_failures,
retry_after_secs,
} => write!(
f,
"Circuit breaker open after {} consecutive failures, retry after {} seconds",
consecutive_failures, retry_after_secs
),
| Self::ComponentTooLarge {
component_type,
size_bytes,
max_bytes,
} => write!(
f,
"Component '{}' size ({} bytes) exceeds maximum ({} bytes). \
This may indicate unbounded data growth or serialization issues.",
component_type, size_bytes, max_bytes
),
| Self::Other(msg) => write!(f, "{}", msg),
}
}
}
impl std::error::Error for PersistenceError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
| Self::Database(err) => Some(err),
| Self::Serialization(err) => Some(err),
| Self::Io(err) => Some(err),
| _ => None,
}
}
}
// Conversions from common error types
impl From<rusqlite::Error> for PersistenceError {
fn from(err: rusqlite::Error) -> Self {
Self::Database(err)
}
}
impl From<bincode::Error> for PersistenceError {
fn from(err: bincode::Error) -> Self {
Self::Serialization(err)
}
}
impl From<std::io::Error> for PersistenceError {
fn from(err: std::io::Error) -> Self {
Self::Io(err)
}
}
impl From<toml::de::Error> for PersistenceError {
fn from(err: toml::de::Error) -> Self {
Self::Config(err.to_string())
}
}
impl From<toml::ser::Error> for PersistenceError {
fn from(err: toml::ser::Error) -> Self {
Self::Config(err.to_string())
}
}

View File

@@ -0,0 +1,218 @@
//! Health monitoring and error recovery for persistence layer
use std::time::{
Duration,
Instant,
};
use bevy::prelude::*;
/// Base delay for exponential backoff in milliseconds
const BASE_RETRY_DELAY_MS: u64 = 1000; // 1 second
/// Maximum retry delay in milliseconds (caps exponential backoff)
const MAX_RETRY_DELAY_MS: u64 = 30000; // 30 seconds
/// Maximum exponent for exponential backoff calculation
const MAX_BACKOFF_EXPONENT: u32 = 5;
/// Resource to track persistence health and failures
#[derive(Resource, Debug)]
pub struct PersistenceHealth {
/// Number of consecutive flush failures
pub consecutive_flush_failures: u32,
/// Number of consecutive checkpoint failures
pub consecutive_checkpoint_failures: u32,
/// Time of last successful flush
pub last_successful_flush: Option<Instant>,
/// Time of last successful checkpoint
pub last_successful_checkpoint: Option<Instant>,
/// Whether the persistence layer is in circuit breaker mode
pub circuit_breaker_open: bool,
/// When the circuit breaker was opened
pub circuit_breaker_opened_at: Option<Instant>,
/// Total number of failures across the session
pub total_failures: u64,
}
impl Default for PersistenceHealth {
fn default() -> Self {
Self {
consecutive_flush_failures: 0,
consecutive_checkpoint_failures: 0,
last_successful_flush: None,
last_successful_checkpoint: None,
circuit_breaker_open: false,
circuit_breaker_opened_at: None,
total_failures: 0,
}
}
}
impl PersistenceHealth {
/// How long to keep circuit breaker open before attempting recovery
pub const CIRCUIT_BREAKER_COOLDOWN: Duration = Duration::from_secs(60);
/// Circuit breaker threshold - open after this many consecutive failures
pub const CIRCUIT_BREAKER_THRESHOLD: u32 = 5;
/// Record a successful flush
pub fn record_flush_success(&mut self) {
self.consecutive_flush_failures = 0;
self.last_successful_flush = Some(Instant::now());
// Close circuit breaker if it was open
if self.circuit_breaker_open {
info!("Persistence recovered - closing circuit breaker");
self.circuit_breaker_open = false;
self.circuit_breaker_opened_at = None;
}
}
/// Record a flush failure
pub fn record_flush_failure(&mut self) {
self.consecutive_flush_failures += 1;
self.total_failures += 1;
if self.consecutive_flush_failures >= Self::CIRCUIT_BREAKER_THRESHOLD {
if !self.circuit_breaker_open {
warn!(
"Opening circuit breaker after {} consecutive flush failures",
self.consecutive_flush_failures
);
self.circuit_breaker_open = true;
self.circuit_breaker_opened_at = Some(Instant::now());
}
}
}
/// Record a successful checkpoint
pub fn record_checkpoint_success(&mut self) {
self.consecutive_checkpoint_failures = 0;
self.last_successful_checkpoint = Some(Instant::now());
}
/// Record a checkpoint failure
pub fn record_checkpoint_failure(&mut self) {
self.consecutive_checkpoint_failures += 1;
self.total_failures += 1;
}
/// Check if we should attempt operations (circuit breaker state)
///
/// **CRITICAL FIX**: Now takes `&mut self` to properly reset the circuit
/// breaker after cooldown expires. This prevents the circuit breaker
/// from remaining permanently open after one post-cooldown failure.
pub fn should_attempt_operation(&mut self) -> bool {
if !self.circuit_breaker_open {
return true;
}
// Check if cooldown period has elapsed
if let Some(opened_at) = self.circuit_breaker_opened_at {
if opened_at.elapsed() >= Self::CIRCUIT_BREAKER_COOLDOWN {
// Transition to half-open state by resetting the breaker
info!(
"Circuit breaker cooldown elapsed - entering half-open state (testing recovery)"
);
self.circuit_breaker_open = false;
self.circuit_breaker_opened_at = None;
// consecutive_flush_failures is kept to track if this probe succeeds
return true;
}
}
false
}
/// Get exponential backoff delay based on consecutive failures
pub fn get_retry_delay(&self) -> Duration {
// Exponential backoff: 1s, 2s, 4s, 8s, 16s, max 30s
let delay_ms = BASE_RETRY_DELAY_MS *
2u64.pow(self.consecutive_flush_failures.min(MAX_BACKOFF_EXPONENT));
Duration::from_millis(delay_ms.min(MAX_RETRY_DELAY_MS))
}
}
/// Message emitted when persistence fails
#[derive(Message, Debug, Clone)]
pub struct PersistenceFailureEvent {
pub error: String,
pub consecutive_failures: u32,
pub circuit_breaker_open: bool,
}
/// Message emitted when persistence recovers from failures
#[derive(Message, Debug, Clone)]
pub struct PersistenceRecoveryEvent {
pub previous_failures: u32,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_circuit_breaker() {
let mut health = PersistenceHealth::default();
// Should allow operations initially
assert!(health.should_attempt_operation());
assert!(!health.circuit_breaker_open);
// Record failures
for _ in 0..PersistenceHealth::CIRCUIT_BREAKER_THRESHOLD {
health.record_flush_failure();
}
// Circuit breaker should now be open
assert!(health.circuit_breaker_open);
assert!(!health.should_attempt_operation());
// Should still block immediately after opening
assert!(!health.should_attempt_operation());
}
#[test]
fn test_recovery() {
let mut health = PersistenceHealth::default();
// Trigger circuit breaker
for _ in 0..PersistenceHealth::CIRCUIT_BREAKER_THRESHOLD {
health.record_flush_failure();
}
assert!(health.circuit_breaker_open);
// Successful flush should close circuit breaker
health.record_flush_success();
assert!(!health.circuit_breaker_open);
assert_eq!(health.consecutive_flush_failures, 0);
}
#[test]
fn test_exponential_backoff() {
let mut health = PersistenceHealth::default();
// No failures = 1s delay
assert_eq!(health.get_retry_delay(), Duration::from_secs(1));
// 1 failure = 2s
health.record_flush_failure();
assert_eq!(health.get_retry_delay(), Duration::from_secs(2));
// 2 failures = 4s
health.record_flush_failure();
assert_eq!(health.get_retry_delay(), Duration::from_secs(4));
// Max out at 30s
for _ in 0..10 {
health.record_flush_failure();
}
assert_eq!(health.get_retry_delay(), Duration::from_secs(30));
}
}

View File

@@ -0,0 +1,165 @@
//! iOS lifecycle event handling for persistence
//!
//! This module provides event types and handlers for iOS application lifecycle
//! events that require immediate persistence (e.g., background suspension).
//!
//! # iOS Integration
//!
//! To integrate with iOS, wire up these handlers in your app delegate:
//!
//! ```swift
//! // In your iOS app delegate:
//! func applicationWillResignActive(_ application: UIApplication) {
//! // Send AppLifecycleEvent::WillResignActive to Bevy
//! }
//!
//! func applicationDidEnterBackground(_ application: UIApplication) {
//! // Send AppLifecycleEvent::DidEnterBackground to Bevy
//! }
//! ```
use bevy::prelude::*;
use crate::persistence::*;
/// Application lifecycle events that require persistence handling
///
/// These events are critical moments where data must be flushed immediately
/// to avoid data loss.
#[derive(Debug, Clone, Message)]
pub enum AppLifecycleEvent {
/// Application will resign active (iOS: `applicationWillResignActive`)
///
/// Sent when the app is about to move from active to inactive state.
/// Example: incoming phone call, user switches to another app
WillResignActive,
/// Application did enter background (iOS: `applicationDidEnterBackground`)
///
/// Sent when the app has moved to the background. The app has approximately
/// 5 seconds to complete critical tasks before suspension.
DidEnterBackground,
/// Application will enter foreground (iOS:
/// `applicationWillEnterForeground`)
///
/// Sent when the app is about to enter the foreground (user returning to
/// app).
WillEnterForeground,
/// Application did become active (iOS: `applicationDidBecomeActive`)
///
/// Sent when the app has become active and is ready to receive user input.
DidBecomeActive,
/// Application will terminate (iOS: `applicationWillTerminate`)
///
/// Sent when the app is about to terminate. Similar to shutdown but from
/// OS.
WillTerminate,
}
/// System to handle iOS lifecycle events and trigger immediate persistence
///
/// This system listens for lifecycle events and performs immediate flushes
/// when the app is backgrounding or terminating.
pub fn lifecycle_event_system(
mut events: MessageReader<AppLifecycleEvent>,
mut write_buffer: ResMut<WriteBufferResource>,
db: Res<PersistenceDb>,
mut metrics: ResMut<PersistenceMetrics>,
mut health: ResMut<PersistenceHealth>,
mut pending_tasks: ResMut<PendingFlushTasks>,
) {
for event in events.read() {
match event {
| AppLifecycleEvent::WillResignActive => {
// App is becoming inactive - perform immediate flush
info!("App will resign active - performing immediate flush");
if let Err(e) = force_flush(&mut write_buffer, &db, &mut metrics) {
error!("Failed to flush on resign active: {}", e);
health.record_flush_failure();
} else {
health.record_flush_success();
}
},
| AppLifecycleEvent::DidEnterBackground => {
// App entered background - perform immediate flush and checkpoint
info!("App entered background - performing immediate flush and checkpoint");
// Force immediate flush
if let Err(e) = force_flush(&mut write_buffer, &db, &mut metrics) {
error!("Failed to flush on background: {}", e);
health.record_flush_failure();
} else {
health.record_flush_success();
}
// Also checkpoint the WAL to ensure durability
let start = std::time::Instant::now();
match db.lock() {
| Ok(mut conn) => match checkpoint_wal(&mut conn, CheckpointMode::Passive) {
| Ok(_) => {
let duration = start.elapsed();
metrics.record_checkpoint(duration);
health.record_checkpoint_success();
info!("Background checkpoint completed successfully");
},
| Err(e) => {
error!("Failed to checkpoint on background: {}", e);
health.record_checkpoint_failure();
},
},
| Err(e) => {
error!("Failed to acquire database lock for checkpoint: {}", e);
health.record_checkpoint_failure();
},
}
},
| AppLifecycleEvent::WillTerminate => {
// App will terminate - perform shutdown sequence
warn!("App will terminate - performing shutdown sequence");
if let Err(e) = shutdown_system(
&mut write_buffer,
&db,
&mut metrics,
Some(&mut pending_tasks),
) {
error!("Failed to perform shutdown on terminate: {}", e);
} else {
info!("Clean shutdown completed on terminate");
}
},
| AppLifecycleEvent::WillEnterForeground => {
// App returning from background - no immediate action needed
info!("App will enter foreground");
},
| AppLifecycleEvent::DidBecomeActive => {
// App became active - no immediate action needed
info!("App did become active");
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_lifecycle_event_creation() {
let event = AppLifecycleEvent::WillResignActive;
match event {
| AppLifecycleEvent::WillResignActive => {
// Success
},
| _ => panic!("Event type mismatch"),
}
}
}

View File

@@ -0,0 +1,211 @@
//! Metrics tracking for persistence layer
use std::time::Duration;
/// Metrics for monitoring persistence performance
#[derive(Debug, Clone, Default, bevy::prelude::Resource)]
pub struct PersistenceMetrics {
// Write volume
pub total_writes: u64,
pub bytes_written: u64,
// Timing
pub flush_count: u64,
pub total_flush_duration: Duration,
pub checkpoint_count: u64,
pub total_checkpoint_duration: Duration,
// WAL health
pub wal_size_bytes: u64,
pub max_wal_size_bytes: u64,
// Recovery
pub crash_recovery_count: u64,
pub clean_shutdown_count: u64,
// Buffer stats
pub max_buffer_size: usize,
pub total_coalesced_ops: u64,
}
impl PersistenceMetrics {
/// Record a flush operation
pub fn record_flush(&mut self, operations: usize, duration: Duration, bytes_written: u64) {
self.flush_count += 1;
self.total_writes += operations as u64;
self.total_flush_duration += duration;
self.bytes_written += bytes_written;
}
/// Record a checkpoint operation
pub fn record_checkpoint(&mut self, duration: Duration) {
self.checkpoint_count += 1;
self.total_checkpoint_duration += duration;
}
/// Update WAL size
pub fn update_wal_size(&mut self, size: u64) {
self.wal_size_bytes = size;
if size > self.max_wal_size_bytes {
self.max_wal_size_bytes = size;
}
}
/// Record a crash recovery
pub fn record_crash_recovery(&mut self) {
self.crash_recovery_count += 1;
}
/// Record a clean shutdown
pub fn record_clean_shutdown(&mut self) {
self.clean_shutdown_count += 1;
}
/// Record buffer stats
pub fn record_buffer_stats(&mut self, buffer_size: usize, coalesced: u64) {
if buffer_size > self.max_buffer_size {
self.max_buffer_size = buffer_size;
}
self.total_coalesced_ops += coalesced;
}
/// Get average flush duration
pub fn avg_flush_duration(&self) -> Duration {
if self.flush_count == 0 {
Duration::from_secs(0)
} else {
self.total_flush_duration / self.flush_count as u32
}
}
/// Get average checkpoint duration
pub fn avg_checkpoint_duration(&self) -> Duration {
if self.checkpoint_count == 0 {
Duration::from_secs(0)
} else {
self.total_checkpoint_duration / self.checkpoint_count as u32
}
}
/// Get crash recovery rate
pub fn crash_recovery_rate(&self) -> f64 {
let total = self.crash_recovery_count + self.clean_shutdown_count;
if total == 0 {
0.0
} else {
self.crash_recovery_count as f64 / total as f64
}
}
/// Check if metrics indicate performance issues
pub fn check_health(&self) -> Vec<HealthWarning> {
let mut warnings = Vec::new();
// Check flush duration
if self.avg_flush_duration() > Duration::from_millis(50) {
warnings.push(HealthWarning::SlowFlush(self.avg_flush_duration()));
}
// Check WAL size
if self.wal_size_bytes > 5 * 1024 * 1024 {
// 5MB
warnings.push(HealthWarning::LargeWal(self.wal_size_bytes));
}
// Check crash rate
if self.crash_recovery_rate() > 0.1 {
warnings.push(HealthWarning::HighCrashRate(self.crash_recovery_rate()));
}
warnings
}
/// Reset all metrics
pub fn reset(&mut self) {
*self = Self::default();
}
}
/// Health warnings for persistence metrics
#[derive(Debug, Clone)]
pub enum HealthWarning {
/// Flush operations are taking too long
SlowFlush(Duration),
/// WAL file is too large
LargeWal(u64),
/// High crash recovery rate
HighCrashRate(f64),
}
impl std::fmt::Display for HealthWarning {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
| HealthWarning::SlowFlush(duration) => {
write!(f, "Flush duration ({:?}) exceeds 50ms threshold", duration)
},
| HealthWarning::LargeWal(size) => {
write!(f, "WAL size ({} bytes) exceeds 5MB threshold", size)
},
| HealthWarning::HighCrashRate(rate) => {
write!(
f,
"Crash recovery rate ({:.1}%) exceeds 10% threshold",
rate * 100.0
)
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_metrics_recording() {
let mut metrics = PersistenceMetrics::default();
metrics.record_flush(10, Duration::from_millis(5), 1024);
assert_eq!(metrics.flush_count, 1);
assert_eq!(metrics.total_writes, 10);
assert_eq!(metrics.bytes_written, 1024);
metrics.record_checkpoint(Duration::from_millis(10));
assert_eq!(metrics.checkpoint_count, 1);
}
#[test]
fn test_average_calculations() {
let mut metrics = PersistenceMetrics::default();
metrics.record_flush(10, Duration::from_millis(10), 1024);
metrics.record_flush(20, Duration::from_millis(20), 2048);
assert_eq!(metrics.avg_flush_duration(), Duration::from_millis(15));
}
#[test]
fn test_health_warnings() {
let mut metrics = PersistenceMetrics::default();
// Add slow flush
metrics.record_flush(10, Duration::from_millis(100), 1024);
let warnings = metrics.check_health();
assert_eq!(warnings.len(), 1);
assert!(matches!(warnings[0], HealthWarning::SlowFlush(_)));
}
#[test]
fn test_crash_recovery_rate() {
let mut metrics = PersistenceMetrics::default();
metrics.record_crash_recovery();
metrics.record_clean_shutdown();
metrics.record_clean_shutdown();
assert_eq!(metrics.crash_recovery_rate(), 1.0 / 3.0);
}
}

View File

@@ -0,0 +1,189 @@
//! Database migration system
//!
//! Provides versioned schema migrations for SQLite database evolution.
use rusqlite::Connection;
use crate::persistence::error::Result;
/// Migration metadata
#[derive(Debug, Clone)]
pub struct Migration {
/// Migration version number
pub version: i64,
/// Migration name/description
pub name: &'static str,
/// SQL statements to apply
pub up: &'static str,
}
/// All available migrations in order
pub const MIGRATIONS: &[Migration] = &[
Migration {
version: 1,
name: "initial_schema",
up: include_str!("migrations/001_initial_schema.sql"),
},
Migration {
version: 4,
name: "sessions",
up: include_str!("migrations/004_sessions.sql"),
},
];
/// Initialize the migrations table
fn create_migrations_table(conn: &Connection) -> Result<()> {
conn.execute(
"CREATE TABLE IF NOT EXISTS schema_migrations (
version INTEGER PRIMARY KEY,
name TEXT NOT NULL,
applied_at INTEGER NOT NULL
)",
[],
)?;
Ok(())
}
/// Get the current schema version
pub fn get_current_version(conn: &Connection) -> Result<i64> {
create_migrations_table(conn)?;
let version = conn
.query_row(
"SELECT COALESCE(MAX(version), 0) FROM schema_migrations",
[],
|row| row.get(0),
)
.unwrap_or(0);
Ok(version)
}
/// Check if a migration has been applied
fn is_migration_applied(conn: &Connection, version: i64) -> Result<bool> {
let count: i64 = conn.query_row(
"SELECT COUNT(*) FROM schema_migrations WHERE version = ?1",
[version],
|row| row.get(0),
)?;
Ok(count > 0)
}
/// Apply a single migration
fn apply_migration(conn: &mut Connection, migration: &Migration) -> Result<()> {
tracing::info!(
"Applying migration {} ({})",
migration.version,
migration.name
);
let tx = conn.transaction()?;
// Execute the migration SQL
tx.execute_batch(migration.up)?;
// Record that we applied this migration
tx.execute(
"INSERT INTO schema_migrations (version, name, applied_at)
VALUES (?1, ?2, ?3)",
rusqlite::params![
migration.version,
migration.name,
chrono::Utc::now().timestamp(),
],
)?;
tx.commit()?;
tracing::info!(
"Migration {} ({}) applied successfully",
migration.version,
migration.name
);
Ok(())
}
/// Run all pending migrations
pub fn run_migrations(conn: &mut Connection) -> Result<()> {
create_migrations_table(conn)?;
let current_version = get_current_version(conn)?;
tracing::info!("Current schema version: {}", current_version);
let mut applied_count = 0;
for migration in MIGRATIONS {
if !is_migration_applied(conn, migration.version)? {
apply_migration(conn, migration)?;
applied_count += 1;
}
}
if applied_count > 0 {
tracing::info!("Applied {} migration(s)", applied_count);
} else {
tracing::debug!("No pending migrations");
}
Ok(())
}
#[cfg(test)]
mod tests {
use rusqlite::Connection;
use super::*;
#[test]
fn test_migration_system() {
let mut conn = Connection::open_in_memory().unwrap();
// Initially at version 0
assert_eq!(get_current_version(&conn).unwrap(), 0);
// Run migrations
run_migrations(&mut conn).unwrap();
// Should be at latest version
let latest_version = MIGRATIONS.last().unwrap().version;
assert_eq!(get_current_version(&conn).unwrap(), latest_version);
// Running again should be a no-op
run_migrations(&mut conn).unwrap();
assert_eq!(get_current_version(&conn).unwrap(), latest_version);
}
#[test]
fn test_migrations_table_created() {
let conn = Connection::open_in_memory().unwrap();
create_migrations_table(&conn).unwrap();
// Should be able to query the table
let count: i64 = conn
.query_row("SELECT COUNT(*) FROM schema_migrations", [], |row| {
row.get(0)
})
.unwrap();
assert_eq!(count, 0);
}
#[test]
fn test_is_migration_applied() {
let conn = Connection::open_in_memory().unwrap();
create_migrations_table(&conn).unwrap();
// Migration 1 should not be applied yet
assert!(!is_migration_applied(&conn, 1).unwrap());
// Apply migration 1
conn.execute(
"INSERT INTO schema_migrations (version, name, applied_at) VALUES (1, 'test', 0)",
[],
)
.unwrap();
// Now it should be applied
assert!(is_migration_applied(&conn, 1).unwrap());
}
}

View File

@@ -0,0 +1,62 @@
-- Migration 001: Initial schema
-- Creates the base tables for entity persistence and CRDT sync
-- Entities table - stores entity metadata
CREATE TABLE IF NOT EXISTS entities (
id BLOB PRIMARY KEY,
entity_type TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL
);
-- Components table - stores serialized component data
CREATE TABLE IF NOT EXISTS components (
entity_id BLOB NOT NULL,
component_type TEXT NOT NULL,
data BLOB NOT NULL,
updated_at INTEGER NOT NULL,
PRIMARY KEY (entity_id, component_type),
FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE
);
-- Index for querying components by entity
CREATE INDEX IF NOT EXISTS idx_components_entity
ON components(entity_id);
-- Operation log - for CRDT sync protocol
CREATE TABLE IF NOT EXISTS operation_log (
id INTEGER PRIMARY KEY AUTOINCREMENT,
node_id TEXT NOT NULL,
sequence_number INTEGER NOT NULL,
operation BLOB NOT NULL,
timestamp INTEGER NOT NULL,
UNIQUE(node_id, sequence_number)
);
-- Index for efficient operation log queries
CREATE INDEX IF NOT EXISTS idx_oplog_node_seq
ON operation_log(node_id, sequence_number);
-- Vector clock table - for causality tracking
CREATE TABLE IF NOT EXISTS vector_clock (
node_id TEXT PRIMARY KEY,
counter INTEGER NOT NULL,
updated_at INTEGER NOT NULL
);
-- Session state table - for crash detection
CREATE TABLE IF NOT EXISTS session_state (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at INTEGER NOT NULL
);
-- WAL checkpoint tracking
CREATE TABLE IF NOT EXISTS checkpoint_state (
last_checkpoint INTEGER NOT NULL,
wal_size_bytes INTEGER NOT NULL
);
-- Initialize checkpoint state if not exists
INSERT OR IGNORE INTO checkpoint_state (rowid, last_checkpoint, wal_size_bytes)
VALUES (1, strftime('%s', 'now'), 0);

View File

@@ -0,0 +1,51 @@
-- Migration 004: Add session support
-- Adds session tables and session-scopes existing tables
-- Sessions table
CREATE TABLE IF NOT EXISTS sessions (
id BLOB PRIMARY KEY,
code TEXT NOT NULL,
name TEXT,
created_at INTEGER NOT NULL,
last_active INTEGER NOT NULL,
entity_count INTEGER NOT NULL DEFAULT 0,
state TEXT NOT NULL,
secret BLOB,
UNIQUE(id),
UNIQUE(code)
);
-- Index for finding recent sessions
CREATE INDEX IF NOT EXISTS idx_sessions_last_active
ON sessions(last_active DESC);
-- Session membership (which node was in which session)
CREATE TABLE IF NOT EXISTS session_membership (
session_id BLOB NOT NULL,
node_id TEXT NOT NULL,
joined_at INTEGER NOT NULL,
left_at INTEGER,
PRIMARY KEY (session_id, node_id),
FOREIGN KEY (session_id) REFERENCES sessions(id) ON DELETE CASCADE
);
-- Add session_id to entities table
ALTER TABLE entities ADD COLUMN session_id BLOB;
-- Index for session-scoped entity queries
CREATE INDEX IF NOT EXISTS idx_entities_session
ON entities(session_id);
-- Add session_id to vector_clock
ALTER TABLE vector_clock ADD COLUMN session_id BLOB;
-- Composite index for session + node lookups
CREATE INDEX IF NOT EXISTS idx_vector_clock_session_node
ON vector_clock(session_id, node_id);
-- Add session_id to operation_log
ALTER TABLE operation_log ADD COLUMN session_id BLOB;
-- Index for session-scoped operation queries
CREATE INDEX IF NOT EXISTS idx_operation_log_session
ON operation_log(session_id, node_id, sequence_number);

View File

@@ -0,0 +1,55 @@
//! Persistence layer for battery-efficient state management
//!
//! This module implements the persistence strategy defined in RFC 0002.
//! It provides a three-tier system to minimize disk I/O while maintaining data
//! durability:
//!
//! 1. **In-Memory Dirty Tracking** - Track changes without writing immediately
//! 2. **Write Buffer** - Batch and coalesce operations before writing
//! 3. **SQLite with WAL Mode** - Controlled checkpoints to minimize fsync()
//! calls
//!
//! # Example
//!
//! ```no_run
//! use bevy::prelude::*;
//! use libmarathon::persistence::*;
//!
//! fn setup(mut commands: Commands) {
//! // Spawn an entity with the Persisted marker
//! commands.spawn(Persisted::new());
//! }
//!
//! // The persistence plugin automatically tracks changes to Persisted components
//! fn main() {
//! App::new()
//! .add_plugins(DefaultPlugins)
//! .add_plugins(PersistencePlugin::new("app.db"))
//! .add_systems(Startup, setup)
//! .run();
//! }
//! ```
mod config;
mod database;
mod error;
mod health;
mod lifecycle;
mod metrics;
mod migrations;
mod plugin;
pub mod reflection;
mod systems;
mod types;
pub use config::*;
pub use database::*;
pub use error::*;
pub use health::*;
pub use lifecycle::*;
pub use metrics::*;
pub use migrations::*;
pub use plugin::*;
pub use reflection::*;
pub use systems::*;
pub use types::*;

View File

@@ -0,0 +1,313 @@
//! Bevy plugin for the persistence layer
//!
//! This module provides a Bevy plugin that sets up all the necessary resources
//! and systems for the persistence layer.
use std::{
ops::{
Deref,
DerefMut,
},
path::PathBuf,
};
use bevy::prelude::*;
use crate::persistence::*;
/// Bevy plugin for persistence
///
/// # Example
///
/// ```no_run
/// use bevy::prelude::*;
/// use libmarathon::persistence::PersistencePlugin;
///
/// App::new()
/// .add_plugins(PersistencePlugin::new("app.db"))
/// .run();
/// ```
pub struct PersistencePlugin {
/// Path to the SQLite database file
pub db_path: PathBuf,
/// Persistence configuration
pub config: PersistenceConfig,
}
impl PersistencePlugin {
/// Create a new persistence plugin with default configuration
pub fn new(db_path: impl Into<PathBuf>) -> Self {
Self {
db_path: db_path.into(),
config: PersistenceConfig::default(),
}
}
/// Create a new persistence plugin with custom configuration
pub fn with_config(db_path: impl Into<PathBuf>, config: PersistenceConfig) -> Self {
Self {
db_path: db_path.into(),
config,
}
}
/// Load configuration from a TOML file
pub fn with_config_file(
db_path: impl Into<PathBuf>,
config_path: impl AsRef<std::path::Path>,
) -> crate::persistence::error::Result<Self> {
let config = load_config_from_file(config_path)?;
Ok(Self {
db_path: db_path.into(),
config,
})
}
}
impl Plugin for PersistencePlugin {
fn build(&self, app: &mut App) {
// Initialize database
let db = PersistenceDb::from_path(&self.db_path)
.expect("Failed to initialize persistence database");
// Register types for reflection
app.register_type::<Persisted>();
// Add messages/events
app.add_message::<PersistenceFailureEvent>()
.add_message::<PersistenceRecoveryEvent>()
.add_message::<AppLifecycleEvent>();
// Insert resources
app.insert_resource(db)
.insert_resource(DirtyEntitiesResource::default())
.insert_resource(WriteBufferResource::new(self.config.max_buffer_operations))
.insert_resource(self.config.clone())
.insert_resource(BatteryStatus::default())
.insert_resource(PersistenceMetrics::default())
.insert_resource(CheckpointTimer::default())
.insert_resource(PersistenceHealth::default())
.insert_resource(PendingFlushTasks::default());
// Add startup system
app.add_systems(Startup, persistence_startup_system);
// Add systems in the appropriate schedule
app.add_systems(
Update,
(
lifecycle_event_system,
collect_dirty_entities_bevy_system,
flush_system,
checkpoint_bevy_system,
)
.chain(),
);
}
}
/// Resource wrapper for DirtyEntities
#[derive(Resource, Default)]
pub struct DirtyEntitiesResource(pub DirtyEntities);
impl std::ops::Deref for DirtyEntitiesResource {
type Target = DirtyEntities;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for DirtyEntitiesResource {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// Resource wrapper for WriteBuffer
#[derive(Resource)]
pub struct WriteBufferResource(pub WriteBuffer);
impl WriteBufferResource {
pub fn new(max_operations: usize) -> Self {
Self(WriteBuffer::new(max_operations))
}
}
impl std::ops::Deref for WriteBufferResource {
type Target = WriteBuffer;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for WriteBufferResource {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// Startup system to initialize persistence
fn persistence_startup_system(db: Res<PersistenceDb>, mut metrics: ResMut<PersistenceMetrics>) {
if let Err(e) = startup_system(db.deref(), metrics.deref_mut()) {
error!("Failed to initialize persistence: {}", e);
} else {
info!("Persistence system initialized");
}
}
/// System to collect dirty entities using Bevy's change detection
///
/// This system tracks changes to the `Persisted` component. When `Persisted` is
/// marked as changed (via `mark_dirty()` or direct mutation), ALL components on
/// that entity are serialized and added to the write buffer.
///
/// For automatic tracking without manual `mark_dirty()` calls, use the
/// `auto_track_component_changes_system` which automatically detects changes
/// to common components like Transform, GlobalTransform, etc.
fn collect_dirty_entities_bevy_system(world: &mut World) {
// Collect changed entities first
let changed_entities: Vec<(Entity, uuid::Uuid)> = {
let mut query = world.query_filtered::<(Entity, &Persisted), Changed<Persisted>>();
query
.iter(world)
.map(|(entity, persisted)| (entity, persisted.network_id))
.collect()
};
if changed_entities.is_empty() {
return;
}
// Serialize components for each entity
for (entity, network_id) in changed_entities {
// First, ensure the entity exists in the database
{
let now = chrono::Utc::now();
let mut write_buffer = world.resource_mut::<WriteBufferResource>();
if let Err(e) = write_buffer.add(PersistenceOp::UpsertEntity {
id: network_id,
data: EntityData {
id: network_id,
created_at: now,
updated_at: now,
entity_type: "NetworkedEntity".to_string(),
},
}) {
error!(
"Failed to add UpsertEntity operation for {}: {}",
network_id, e
);
return; // Skip this entity if we can't even add the entity op
}
}
// Serialize all components on this entity (generic tracking)
let components = {
let type_registry = world.resource::<AppTypeRegistry>().read();
let comps = serialize_all_components_from_entity(entity, world, &type_registry);
drop(type_registry);
comps
};
// Add operations for each component
for (component_type, data) in components {
// Get mutable access to dirty and mark it
{
let mut dirty = world.resource_mut::<DirtyEntitiesResource>();
dirty.mark_dirty(network_id, &component_type);
}
// Get mutable access to write_buffer and add the operation
{
let mut write_buffer = world.resource_mut::<WriteBufferResource>();
if let Err(e) = write_buffer.add(PersistenceOp::UpsertComponent {
entity_id: network_id,
component_type: component_type.clone(),
data,
}) {
error!(
"Failed to add UpsertComponent operation for entity {} component {}: {}",
network_id, component_type, e
);
// Continue with other components even if one fails
}
}
}
}
}
/// System to automatically track changes to common Bevy components
///
/// This system detects changes to Transform, automatically triggering
/// persistence by accessing `Persisted` mutably (which marks it as changed via
/// Bevy's change detection).
///
/// Add this system to your app if you want automatic persistence of Transform
/// changes:
///
/// ```no_run
/// # use bevy::prelude::*;
/// # use libmarathon::persistence::*;
/// App::new()
/// .add_plugins(PersistencePlugin::new("app.db"))
/// .add_systems(Update, auto_track_transform_changes_system)
/// .run();
/// ```
pub fn auto_track_transform_changes_system(
mut query: Query<&mut Persisted, (With<Transform>, Changed<Transform>)>,
) {
// Simply accessing &mut Persisted triggers Bevy's change detection
for _persisted in query.iter_mut() {
// No-op - the mutable access itself marks Persisted as changed
}
}
/// System to checkpoint the WAL
fn checkpoint_bevy_system(
db: Res<PersistenceDb>,
config: Res<PersistenceConfig>,
mut timer: ResMut<CheckpointTimer>,
mut metrics: ResMut<PersistenceMetrics>,
mut health: ResMut<PersistenceHealth>,
) {
match checkpoint_system(
db.deref(),
config.deref(),
timer.deref_mut(),
metrics.deref_mut(),
) {
| Ok(_) => {
health.record_checkpoint_success();
},
| Err(e) => {
health.record_checkpoint_failure();
error!(
"Failed to checkpoint WAL (attempt {}): {}",
health.consecutive_checkpoint_failures, e
);
},
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_plugin_creation() {
let plugin = PersistencePlugin::new("test.db");
assert_eq!(plugin.db_path, PathBuf::from("test.db"));
}
#[test]
fn test_plugin_with_config() {
let mut config = PersistenceConfig::default();
config.flush_interval_secs = 5;
let plugin = PersistencePlugin::with_config("test.db", config);
assert_eq!(plugin.config.flush_interval_secs, 5);
}
}

View File

@@ -0,0 +1,313 @@
//! Reflection-based component serialization for persistence
//!
//! This module provides utilities to serialize and deserialize Bevy components
//! using reflection, allowing the persistence layer to work with any component
//! that implements Reflect.
use bevy::{
prelude::*,
reflect::{
TypeRegistry,
serde::{
ReflectSerializer,
TypedReflectDeserializer,
TypedReflectSerializer,
},
},
};
use bincode::Options as _;
use serde::de::DeserializeSeed;
use crate::persistence::error::{
PersistenceError,
Result,
};
/// Marker component to indicate that an entity should be persisted
///
/// Add this component to any entity that should have its state persisted to
/// disk. The persistence system will automatically serialize all components on
/// entities with this marker when they change.
///
/// # Triggering Persistence
///
/// To trigger persistence after modifying components on an entity, access
/// `Persisted` mutably through a query. Bevy's change detection will
/// automatically mark it as changed:
///
/// ```no_run
/// # use bevy::prelude::*;
/// # use libmarathon::persistence::*;
/// fn update_position(mut query: Query<(&mut Transform, &mut Persisted)>) {
/// for (mut transform, mut persisted) in query.iter_mut() {
/// transform.translation.x += 1.0;
/// // Accessing &mut Persisted triggers change detection automatically
/// }
/// }
/// ```
///
/// Alternatively, use `auto_track_transform_changes_system` for automatic
/// persistence of Transform changes without manual queries.
#[derive(Component, Reflect, Default)]
#[reflect(Component)]
pub struct Persisted {
/// Unique network ID for this entity
pub network_id: uuid::Uuid,
}
impl Persisted {
pub fn new() -> Self {
Self {
network_id: uuid::Uuid::new_v4(),
}
}
pub fn with_id(network_id: uuid::Uuid) -> Self {
Self { network_id }
}
}
/// Trait for components that can be persisted
pub trait Persistable: Component + Reflect {
/// Get the type name for this component (used as key in database)
fn type_name() -> &'static str {
std::any::type_name::<Self>()
}
}
/// Serialize a component using Bevy's reflection system
///
/// This converts any component implementing `Reflect` into bytes for storage.
/// Uses bincode for efficient binary serialization with type information from
/// the registry to handle polymorphic types correctly.
///
/// # Parameters
/// - `component`: Component to serialize (must implement `Reflect`)
/// - `type_registry`: Bevy's type registry for reflection metadata
///
/// # Returns
/// - `Ok(Vec<u8>)`: Serialized component data
/// - `Err`: If serialization fails (e.g., type not properly registered)
///
/// # Examples
/// ```no_run
/// # use bevy::prelude::*;
/// # use libmarathon::persistence::*;
/// # fn example(component: &Transform, registry: &AppTypeRegistry) -> anyhow::Result<()> {
/// let registry = registry.read();
/// let bytes = serialize_component(component.as_reflect(), &registry)?;
/// # Ok(())
/// # }
/// ```
pub fn serialize_component(
component: &dyn Reflect,
type_registry: &TypeRegistry,
) -> Result<Vec<u8>> {
let serializer = ReflectSerializer::new(component, type_registry);
bincode::options()
.serialize(&serializer)
.map_err(PersistenceError::from)
}
/// Serialize a component when the type is known (more efficient for bincode)
///
/// This uses `TypedReflectSerializer` which doesn't include type path
/// information, making it compatible with `TypedReflectDeserializer` for binary
/// formats.
pub fn serialize_component_typed(
component: &dyn Reflect,
type_registry: &TypeRegistry,
) -> Result<Vec<u8>> {
let serializer = TypedReflectSerializer::new(component, type_registry);
bincode::options()
.serialize(&serializer)
.map_err(PersistenceError::from)
}
/// Deserialize a component using Bevy's reflection system
///
/// Converts serialized bytes back into a reflected component. The returned
/// component is boxed and must be downcast to the concrete type for use.
///
/// # Parameters
/// - `bytes`: Serialized component data from [`serialize_component`]
/// - `type_registry`: Bevy's type registry for reflection metadata
///
/// # Returns
/// - `Ok(Box<dyn PartialReflect>)`: Deserialized component (needs downcasting)
/// - `Err`: If deserialization fails (e.g., type not registered, data
/// corruption)
///
/// # Examples
/// ```no_run
/// # use bevy::prelude::*;
/// # use libmarathon::persistence::*;
/// # fn example(bytes: &[u8], registry: &AppTypeRegistry) -> anyhow::Result<()> {
/// let registry = registry.read();
/// let reflected = deserialize_component(bytes, &registry)?;
/// // Downcast to concrete type as needed
/// # Ok(())
/// # }
/// ```
pub fn deserialize_component(
bytes: &[u8],
type_registry: &TypeRegistry,
) -> Result<Box<dyn PartialReflect>> {
let mut deserializer = bincode::Deserializer::from_slice(bytes, bincode::options());
let reflect_deserializer = bevy::reflect::serde::ReflectDeserializer::new(type_registry);
reflect_deserializer
.deserialize(&mut deserializer)
.map_err(|e| PersistenceError::Deserialization(e.to_string()))
}
/// Deserialize a component when the type is known
///
/// Uses `TypedReflectDeserializer` which is more efficient for binary formats
/// like bincode when the component type is known at deserialization time.
pub fn deserialize_component_typed(
bytes: &[u8],
component_type: &str,
type_registry: &TypeRegistry,
) -> Result<Box<dyn PartialReflect>> {
let registration = type_registry
.get_with_type_path(component_type)
.ok_or_else(|| {
PersistenceError::Deserialization(format!("Type {} not registered", component_type))
})?;
let mut deserializer = bincode::Deserializer::from_slice(bytes, bincode::options());
let reflect_deserializer = TypedReflectDeserializer::new(registration, type_registry);
reflect_deserializer
.deserialize(&mut deserializer)
.map_err(|e| PersistenceError::Deserialization(e.to_string()))
}
/// Serialize a component directly from an entity using its type path
///
/// This is a convenience function that combines type lookup, reflection, and
/// serialization. It's the primary method used by the persistence system to
/// save component state without knowing the concrete type at compile time.
///
/// # Parameters
/// - `entity`: Bevy entity to read the component from
/// - `component_type`: Type path string (e.g.,
/// "bevy_transform::components::Transform")
/// - `world`: Bevy world containing the entity
/// - `type_registry`: Bevy's type registry for reflection metadata
///
/// # Returns
/// - `Some(Vec<u8>)`: Serialized component data
/// - `None`: If entity doesn't have the component or type isn't registered
///
/// # Examples
/// ```no_run
/// # use bevy::prelude::*;
/// # use libmarathon::persistence::*;
/// # fn example(entity: Entity, world: &World, registry: &AppTypeRegistry) -> Option<()> {
/// let registry = registry.read();
/// let bytes = serialize_component_from_entity(
/// entity,
/// "bevy_transform::components::Transform",
/// world,
/// &registry,
/// )?;
/// # Some(())
/// # }
/// ```
pub fn serialize_component_from_entity(
entity: Entity,
component_type: &str,
world: &World,
type_registry: &TypeRegistry,
) -> Option<Vec<u8>> {
// Get the type registration
let registration = type_registry.get_with_type_path(component_type)?;
// Get the ReflectComponent data
let reflect_component = registration.data::<ReflectComponent>()?;
// Reflect the component from the entity
let reflected = reflect_component.reflect(world.entity(entity))?;
// Serialize it directly
serialize_component(reflected, type_registry).ok()
}
/// Serialize all components from an entity that have reflection data
///
/// This iterates over all components on an entity and serializes those that:
/// - Are registered in the type registry
/// - Have `ReflectComponent` data (meaning they support reflection)
/// - Are not the `Persisted` marker component (to avoid redundant storage)
///
/// # Parameters
/// - `entity`: Bevy entity to serialize components from
/// - `world`: Bevy world containing the entity
/// - `type_registry`: Bevy's type registry for reflection metadata
///
/// # Returns
/// Vector of tuples containing (component_type_path, serialized_data) for each
/// component
pub fn serialize_all_components_from_entity(
entity: Entity,
world: &World,
type_registry: &TypeRegistry,
) -> Vec<(String, Vec<u8>)> {
let mut components = Vec::new();
// Get the entity reference
let entity_ref = world.entity(entity);
// Iterate over all type registrations
for registration in type_registry.iter() {
// Skip if no ReflectComponent data (not a component)
let Some(reflect_component) = registration.data::<ReflectComponent>() else {
continue;
};
// Get the type path for this component
let type_path = registration.type_info().type_path();
// Skip the Persisted marker component itself (we don't need to persist it)
if type_path.ends_with("::Persisted") {
continue;
}
// Try to reflect this component from the entity
if let Some(reflected) = reflect_component.reflect(entity_ref) {
// Serialize the component using typed serialization for consistency
// This matches the format expected by deserialize_component_typed
if let Ok(data) = serialize_component_typed(reflected, type_registry) {
components.push((type_path.to_string(), data));
}
}
}
components
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Component, Reflect, Default)]
#[reflect(Component)]
struct TestComponent {
value: i32,
}
#[test]
fn test_component_serialization() -> Result<()> {
let mut registry = TypeRegistry::default();
registry.register::<TestComponent>();
let component = TestComponent { value: 42 };
let bytes = serialize_component(&component, &registry)?;
assert!(!bytes.is_empty());
Ok(())
}
}

View File

@@ -0,0 +1,495 @@
//! Bevy systems for the persistence layer
//!
//! This module provides systems that integrate the persistence layer with
//! Bevy's ECS. These systems handle dirty tracking, write buffering, and
//! flushing to SQLite.
use std::{
sync::{
Arc,
Mutex,
},
time::Instant,
};
use bevy::{
prelude::*,
tasks::{
IoTaskPool,
Task,
},
};
use futures_lite::future;
use rusqlite::Connection;
use crate::persistence::{
error::Result,
*,
};
/// Resource wrapping the SQLite connection
#[derive(Clone, bevy::prelude::Resource)]
pub struct PersistenceDb {
pub conn: Arc<Mutex<Connection>>,
}
impl PersistenceDb {
pub fn new(conn: Connection) -> Self {
Self {
conn: Arc::new(Mutex::new(conn)),
}
}
pub fn from_path(path: impl AsRef<std::path::Path>) -> Result<Self> {
let conn = initialize_persistence_db(path)?;
Ok(Self::new(conn))
}
pub fn in_memory() -> Result<Self> {
let conn = Connection::open_in_memory()?;
configure_sqlite_for_persistence(&conn)?;
create_persistence_schema(&conn)?;
Ok(Self::new(conn))
}
/// Acquire the database connection with proper error handling
///
/// Handles mutex poisoning gracefully by converting to PersistenceError.
/// If a thread panics while holding the mutex, subsequent lock attempts
/// will fail with a poisoned error, which this method converts to a
/// recoverable error instead of panicking.
///
/// # Returns
/// - `Ok(MutexGuard<Connection>)`: Locked connection ready for use
/// - `Err(PersistenceError)`: If mutex is poisoned
pub fn lock(&self) -> Result<std::sync::MutexGuard<'_, Connection>> {
self.conn.lock().map_err(|e| {
PersistenceError::Other(format!("Database connection mutex poisoned: {}", e))
})
}
}
/// Resource for tracking when the last checkpoint occurred
#[derive(Debug, bevy::prelude::Resource)]
pub struct CheckpointTimer {
pub last_checkpoint: Instant,
}
impl Default for CheckpointTimer {
fn default() -> Self {
Self {
last_checkpoint: Instant::now(),
}
}
}
/// Resource for tracking pending async flush tasks
#[derive(Default, bevy::prelude::Resource)]
pub struct PendingFlushTasks {
pub tasks: Vec<Task<Result<FlushResult>>>,
}
/// Result of an async flush operation
#[derive(Debug, Clone)]
pub struct FlushResult {
pub operations_count: usize,
pub duration: std::time::Duration,
pub bytes_written: u64,
}
/// Helper function to calculate total bytes written from operations
fn calculate_bytes_written(ops: &[PersistenceOp]) -> u64 {
ops.iter()
.map(|op| match op {
| PersistenceOp::UpsertComponent { data, .. } => data.len() as u64,
| PersistenceOp::LogOperation { operation, .. } => operation.len() as u64,
| _ => 0,
})
.sum()
}
/// Helper function to perform a flush with metrics tracking (synchronous)
///
/// Used for critical operations like shutdown where we need to block
fn perform_flush_sync(
ops: &[PersistenceOp],
db: &PersistenceDb,
metrics: &mut PersistenceMetrics,
) -> Result<()> {
if ops.is_empty() {
return Ok(());
}
let start = Instant::now();
let count = {
let mut conn = db.lock()?;
flush_to_sqlite(ops, &mut conn)?
};
let duration = start.elapsed();
let bytes_written = calculate_bytes_written(ops);
metrics.record_flush(count, duration, bytes_written);
Ok(())
}
/// Helper function to perform a flush asynchronously (for normal operations)
///
/// This runs the blocking SQLite operations on a thread pool via
/// blocking::unblock to avoid blocking the async runtime. This works with both
/// Bevy's async-executor and tokio runtimes, making it compatible with the
/// current Bevy integration and the future dedicated iOS async runtime.
async fn perform_flush_async(ops: Vec<PersistenceOp>, db: PersistenceDb) -> Result<FlushResult> {
if ops.is_empty() {
return Ok(FlushResult {
operations_count: 0,
duration: std::time::Duration::ZERO,
bytes_written: 0,
});
}
let bytes_written = calculate_bytes_written(&ops);
// Use blocking::unblock which works with any async runtime (async-executor,
// tokio, etc.) This spawns the blocking operation on a dedicated thread
// pool
let result = blocking::unblock(move || {
let start = Instant::now();
let count = {
let mut conn = db.lock()?;
flush_to_sqlite(&ops, &mut conn)?
};
let duration = start.elapsed();
Ok::<_, crate::persistence::PersistenceError>((count, duration))
})
.await?;
let (count, duration) = result;
Ok(FlushResult {
operations_count: count,
duration,
bytes_written,
})
}
/// System to flush the write buffer to SQLite asynchronously
///
/// This system runs on a schedule based on the configuration and battery
/// status. It spawns async tasks to avoid blocking the main thread and handles
/// errors gracefully.
///
/// The system also polls pending flush tasks and updates metrics when they
/// complete.
pub fn flush_system(
mut write_buffer: ResMut<WriteBufferResource>,
db: Res<PersistenceDb>,
config: Res<PersistenceConfig>,
battery: Res<BatteryStatus>,
mut metrics: ResMut<PersistenceMetrics>,
mut pending_tasks: ResMut<PendingFlushTasks>,
mut health: ResMut<PersistenceHealth>,
mut failure_events: MessageWriter<PersistenceFailureEvent>,
mut recovery_events: MessageWriter<PersistenceRecoveryEvent>,
) {
// First, poll and handle completed async flush tasks
pending_tasks.tasks.retain_mut(|task| {
if let Some(result) = future::block_on(future::poll_once(task)) {
match result {
| Ok(flush_result) => {
let previous_failures = health.consecutive_flush_failures;
health.record_flush_success();
// Update metrics
metrics.record_flush(
flush_result.operations_count,
flush_result.duration,
flush_result.bytes_written,
);
// Emit recovery event if we recovered from failures
if previous_failures > 0 {
recovery_events.write(PersistenceRecoveryEvent { previous_failures });
}
},
| Err(e) => {
health.record_flush_failure();
let error_msg = format!("{}", e);
error!(
"Async flush failed (attempt {}/{}): {}",
health.consecutive_flush_failures,
PersistenceHealth::CIRCUIT_BREAKER_THRESHOLD,
error_msg
);
// Emit failure event
failure_events.write(PersistenceFailureEvent {
error: error_msg,
consecutive_failures: health.consecutive_flush_failures,
circuit_breaker_open: health.circuit_breaker_open,
});
},
}
false // Remove completed task
} else {
true // Keep pending task
}
});
// Check circuit breaker before spawning new flush
if !health.should_attempt_operation() {
return;
}
let flush_interval = config.get_flush_interval(battery.level, battery.is_charging);
// Check if we should flush
if !write_buffer.should_flush(flush_interval) {
return;
}
// Take operations from buffer
let ops = write_buffer.take_operations();
if ops.is_empty() {
return;
}
// Spawn async flush task on I/O thread pool
let task_pool = IoTaskPool::get();
let db_clone = db.clone();
let task = task_pool.spawn(async move { perform_flush_async(ops, db_clone.clone()).await });
pending_tasks.tasks.push(task);
// Update last flush time
write_buffer.last_flush = Instant::now();
}
/// System to checkpoint the WAL file
///
/// This runs less frequently than flush_system to merge the WAL into the main
/// database.
pub fn checkpoint_system(
db: &PersistenceDb,
config: &PersistenceConfig,
timer: &mut CheckpointTimer,
metrics: &mut PersistenceMetrics,
) -> Result<()> {
let checkpoint_interval = config.get_checkpoint_interval();
// Check if it's time to checkpoint
if timer.last_checkpoint.elapsed() < checkpoint_interval {
// Also check WAL size
let wal_size = {
let conn = db.lock()?;
get_wal_size(&conn)?
};
metrics.update_wal_size(wal_size as u64);
// Force checkpoint if WAL is too large
if wal_size < config.max_wal_size_bytes as i64 {
return Ok(());
}
}
// Perform checkpoint
let start = Instant::now();
let info = {
let mut conn = db.lock()?;
checkpoint_wal(&mut conn, CheckpointMode::Passive)?
};
let duration = start.elapsed();
// Update metrics
metrics.record_checkpoint(duration);
timer.last_checkpoint = Instant::now();
// Log if checkpoint was busy
if info.busy {
tracing::warn!("WAL checkpoint was busy - some pages may not have been checkpointed");
}
Ok(())
}
/// System to handle application shutdown
///
/// This ensures a final flush and checkpoint before the application exits.
/// Uses synchronous flush to ensure all data is written before exit.
///
/// **CRITICAL**: Waits for all pending async flush tasks to complete before
/// proceeding with shutdown. This prevents data loss from in-flight operations.
pub fn shutdown_system(
write_buffer: &mut WriteBuffer,
db: &PersistenceDb,
metrics: &mut PersistenceMetrics,
pending_tasks: Option<&mut PendingFlushTasks>,
) -> Result<()> {
// CRITICAL: Wait for all pending async flushes to complete
// This prevents data loss from in-flight operations
if let Some(pending) = pending_tasks {
info!(
"Waiting for {} pending flush tasks to complete before shutdown",
pending.tasks.len()
);
for task in pending.tasks.drain(..) {
// Block on each pending task to ensure completion
match future::block_on(task) {
| Ok(flush_result) => {
// Update metrics for completed flush
metrics.record_flush(
flush_result.operations_count,
flush_result.duration,
flush_result.bytes_written,
);
debug!(
"Pending flush completed: {} operations",
flush_result.operations_count
);
},
| Err(e) => {
error!("Pending flush failed during shutdown: {}", e);
// Continue with shutdown even if a task failed
},
}
}
info!("All pending flush tasks completed");
}
// Force flush any remaining operations (synchronous for shutdown)
let ops = write_buffer.take_operations();
perform_flush_sync(&ops, db, metrics)?;
// Checkpoint the WAL
let start = Instant::now();
{
let mut conn = db.lock()?;
checkpoint_wal(&mut conn, CheckpointMode::Truncate)?;
// Mark clean shutdown
mark_clean_shutdown(&mut conn)?;
}
let duration = start.elapsed();
metrics.record_checkpoint(duration);
metrics.record_clean_shutdown();
Ok(())
}
/// System to initialize persistence on startup
///
/// This checks for crash recovery and sets up the session.
pub fn startup_system(db: &PersistenceDb, metrics: &mut PersistenceMetrics) -> Result<()> {
let mut conn = db.lock()?;
// Check if previous session shut down cleanly
let clean_shutdown = check_clean_shutdown(&mut conn)?;
if !clean_shutdown {
tracing::warn!("Previous session did not shut down cleanly - crash detected");
metrics.record_crash_recovery();
// Perform any necessary recovery operations here
// For now, SQLite's WAL mode handles recovery automatically
} else {
tracing::info!("Previous session shut down cleanly");
}
// Set up new session
let session = SessionState::new();
set_session_state(&mut conn, "session_id", &session.session_id)?;
Ok(())
}
/// Helper function to force an immediate flush (for critical operations)
///
/// Uses synchronous flush to ensure data is written immediately.
/// Suitable for critical operations like iOS background events.
pub fn force_flush(
write_buffer: &mut WriteBuffer,
db: &PersistenceDb,
metrics: &mut PersistenceMetrics,
) -> Result<()> {
let ops = write_buffer.take_operations();
perform_flush_sync(&ops, db, metrics)?;
write_buffer.last_flush = Instant::now();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_persistence_db_in_memory() -> Result<()> {
let db = PersistenceDb::in_memory()?;
// Verify we can write and read
let entity_id = uuid::Uuid::new_v4();
let ops = vec![PersistenceOp::UpsertEntity {
id: entity_id,
data: EntityData {
id: entity_id,
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
entity_type: "TestEntity".to_string(),
},
}];
let mut conn = db.lock()?;
flush_to_sqlite(&ops, &mut conn)?;
Ok(())
}
#[test]
fn test_flush_system() -> Result<()> {
let db = PersistenceDb::in_memory()?;
let mut write_buffer = WriteBuffer::new(1000);
let mut metrics = PersistenceMetrics::default();
// Add some operations
let entity_id = uuid::Uuid::new_v4();
// First add the entity
write_buffer
.add(PersistenceOp::UpsertEntity {
id: entity_id,
data: EntityData {
id: entity_id,
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
entity_type: "TestEntity".to_string(),
},
})
.unwrap();
// Then add a component
write_buffer
.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![1, 2, 3],
})
.unwrap();
// Take operations and flush synchronously (testing the flush logic)
let ops = write_buffer.take_operations();
perform_flush_sync(&ops, &db, &mut metrics)?;
assert_eq!(metrics.flush_count, 1);
assert_eq!(write_buffer.len(), 0);
Ok(())
}
}

View File

@@ -0,0 +1,886 @@
//! Core types for the persistence layer
use std::{
collections::{
HashMap,
HashSet,
},
time::Instant,
};
use bevy::prelude::Resource;
use chrono::{
DateTime,
Utc,
};
use serde::{
Deserialize,
Serialize,
};
/// Maximum size for a single component in bytes (10MB)
/// Components larger than this may indicate serialization issues or unbounded
/// data growth
const MAX_COMPONENT_SIZE_BYTES: usize = 10 * 1024 * 1024;
/// Critical flush deadline in milliseconds (1 second for tier-1 operations)
const CRITICAL_FLUSH_DEADLINE_MS: u64 = 1000;
/// Unique identifier for entities that can be synced across nodes
pub type EntityId = uuid::Uuid;
/// Node identifier for CRDT operations
pub type NodeId = uuid::Uuid;
/// Priority level for persistence operations
///
/// Determines how quickly an operation should be flushed to disk:
/// - **Normal**: Regular batched flushing (5-60s intervals based on battery)
/// - **Critical**: Flush within 1 second (tier-1 operations like user actions,
/// CRDT ops)
/// - **Immediate**: Flush immediately (shutdown, background suspension)
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum FlushPriority {
/// Normal priority - regular batched flushing
Normal,
/// Critical priority - flush within 1 second
Critical,
/// Immediate priority - flush right now
Immediate,
}
/// Resource to track entities with uncommitted changes
#[derive(Debug, Default)]
pub struct DirtyEntities {
/// Set of entity IDs with changes not yet in write buffer
pub entities: HashSet<EntityId>,
/// Map of entity ID to set of dirty component type names
pub components: HashMap<EntityId, HashSet<String>>,
/// Track when each entity was last modified (for prioritization)
pub last_modified: HashMap<EntityId, Instant>,
}
impl DirtyEntities {
pub fn new() -> Self {
Self::default()
}
/// Mark an entity's component as dirty
pub fn mark_dirty(&mut self, entity_id: EntityId, component_type: impl Into<String>) {
self.entities.insert(entity_id);
self.components
.entry(entity_id)
.or_default()
.insert(component_type.into());
self.last_modified.insert(entity_id, Instant::now());
}
/// Clear all dirty tracking (called after flush to write buffer)
pub fn clear(&mut self) {
self.entities.clear();
self.components.clear();
self.last_modified.clear();
}
/// Check if an entity is dirty
pub fn is_dirty(&self, entity_id: &EntityId) -> bool {
self.entities.contains(entity_id)
}
/// Get the number of dirty entities
pub fn count(&self) -> usize {
self.entities.len()
}
}
/// Operations that can be persisted to the database
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PersistenceOp {
/// Insert or update an entity's existence
UpsertEntity { id: EntityId, data: EntityData },
/// Insert or update a component on an entity
UpsertComponent {
entity_id: EntityId,
component_type: String,
data: Vec<u8>,
},
/// Log an operation for CRDT sync
LogOperation {
node_id: NodeId,
sequence: u64,
operation: Vec<u8>,
},
/// Update vector clock for causality tracking
UpdateVectorClock { node_id: NodeId, counter: u64 },
/// Delete an entity
DeleteEntity { id: EntityId },
/// Delete a component from an entity
DeleteComponent {
entity_id: EntityId,
component_type: String,
},
}
impl PersistenceOp {
/// Get the default priority for this operation type
///
/// CRDT operations (LogOperation, UpdateVectorClock) are critical tier-1
/// operations that should be flushed within 1 second to maintain
/// causality across nodes. Other operations use normal priority by
/// default.
pub fn default_priority(&self) -> FlushPriority {
match self {
// CRDT operations are tier-1 (critical)
| PersistenceOp::LogOperation { .. } | PersistenceOp::UpdateVectorClock { .. } => {
FlushPriority::Critical
},
// All other operations are normal priority by default
| _ => FlushPriority::Normal,
}
}
}
/// Metadata about an entity
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EntityData {
pub id: EntityId,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub entity_type: String,
}
/// Write buffer for batching persistence operations
#[derive(Debug)]
pub struct WriteBuffer {
/// Pending operations not yet committed to SQLite
pub pending_operations: Vec<PersistenceOp>,
/// Index mapping (entity_id, component_type) to position in
/// pending_operations Enables O(1) deduplication for UpsertComponent
/// operations
component_index: std::collections::HashMap<(EntityId, String), usize>,
/// Index mapping entity_id to position in pending_operations
/// Enables O(1) deduplication for UpsertEntity operations
entity_index: std::collections::HashMap<EntityId, usize>,
/// When the buffer was last flushed
pub last_flush: Instant,
/// Maximum number of operations before forcing a flush
pub max_operations: usize,
/// Highest priority operation currently in the buffer
pub highest_priority: FlushPriority,
/// When the first critical operation was added (for deadline tracking)
pub first_critical_time: Option<Instant>,
}
impl WriteBuffer {
pub fn new(max_operations: usize) -> Self {
Self {
pending_operations: Vec::new(),
component_index: std::collections::HashMap::new(),
entity_index: std::collections::HashMap::new(),
last_flush: Instant::now(),
max_operations,
highest_priority: FlushPriority::Normal,
first_critical_time: None,
}
}
/// Add an operation to the write buffer with normal priority
///
/// This is a convenience method that calls `add_with_priority` with
/// `FlushPriority::Normal`.
///
/// # Errors
/// Returns `PersistenceError::ComponentTooLarge` if component data exceeds
/// MAX_COMPONENT_SIZE_BYTES (10MB)
pub fn add(&mut self, op: PersistenceOp) -> Result<(), crate::persistence::PersistenceError> {
self.add_with_priority(op, FlushPriority::Normal)
}
/// Add an operation using its default priority
///
/// Uses `PersistenceOp::default_priority()` to determine priority
/// automatically. CRDT operations will be added as Critical, others as
/// Normal.
///
/// # Errors
/// Returns `PersistenceError::ComponentTooLarge` if component data exceeds
/// MAX_COMPONENT_SIZE_BYTES (10MB)
pub fn add_with_default_priority(
&mut self,
op: PersistenceOp,
) -> Result<(), crate::persistence::PersistenceError> {
let priority = op.default_priority();
self.add_with_priority(op, priority)
}
/// Add an operation to the write buffer with the specified priority
///
/// If an operation for the same entity+component already exists,
/// it will be replaced (keeping only the latest state). The priority
/// is tracked separately to determine flush urgency.
///
/// # Errors
/// Returns `PersistenceError::ComponentTooLarge` if component data exceeds
/// MAX_COMPONENT_SIZE_BYTES (10MB)
pub fn add_with_priority(
&mut self,
op: PersistenceOp,
priority: FlushPriority,
) -> Result<(), crate::persistence::PersistenceError> {
// Validate component size to prevent unbounded memory growth
match &op {
| PersistenceOp::UpsertComponent {
data,
component_type,
..
} => {
if data.len() > MAX_COMPONENT_SIZE_BYTES {
return Err(crate::persistence::PersistenceError::ComponentTooLarge {
component_type: component_type.clone(),
size_bytes: data.len(),
max_bytes: MAX_COMPONENT_SIZE_BYTES,
});
}
},
| PersistenceOp::LogOperation { operation, .. } => {
if operation.len() > MAX_COMPONENT_SIZE_BYTES {
return Err(crate::persistence::PersistenceError::ComponentTooLarge {
component_type: "Operation".to_string(),
size_bytes: operation.len(),
max_bytes: MAX_COMPONENT_SIZE_BYTES,
});
}
},
| _ => {},
}
match &op {
| PersistenceOp::UpsertComponent {
entity_id,
component_type,
..
} => {
// O(1) lookup: check if we already have this component
let key = (*entity_id, component_type.clone());
if let Some(&old_pos) = self.component_index.get(&key) {
// Replace existing operation in-place
self.pending_operations[old_pos] = op;
return Ok(());
}
// New operation: add to index
let new_pos = self.pending_operations.len();
self.component_index.insert(key, new_pos);
},
| PersistenceOp::UpsertEntity { id, .. } => {
// O(1) lookup: check if we already have this entity
if let Some(&old_pos) = self.entity_index.get(id) {
// Replace existing operation in-place
self.pending_operations[old_pos] = op;
return Ok(());
}
// New operation: add to index
let new_pos = self.pending_operations.len();
self.entity_index.insert(*id, new_pos);
},
| _ => {
// Other operations don't need coalescing
},
}
// Track priority for flush urgency
if priority > self.highest_priority {
self.highest_priority = priority;
}
// Track when first critical operation was added (for deadline enforcement)
if priority >= FlushPriority::Critical && self.first_critical_time.is_none() {
self.first_critical_time = Some(Instant::now());
}
self.pending_operations.push(op);
Ok(())
}
/// Take all pending operations and return them for flushing
///
/// This resets the priority tracking state and clears the deduplication
/// indices.
pub fn take_operations(&mut self) -> Vec<PersistenceOp> {
// Reset priority tracking when operations are taken
self.highest_priority = FlushPriority::Normal;
self.first_critical_time = None;
// Clear deduplication indices
self.component_index.clear();
self.entity_index.clear();
std::mem::take(&mut self.pending_operations)
}
/// Check if buffer should be flushed
///
/// Returns true if any of these conditions are met:
/// - Buffer is at capacity (max_operations reached)
/// - Regular flush interval has elapsed (for normal priority)
/// - Critical operation deadline exceeded (1 second for critical ops)
/// - Immediate priority operation exists
pub fn should_flush(&self, flush_interval: std::time::Duration) -> bool {
// Immediate priority always flushes
if self.highest_priority == FlushPriority::Immediate {
return true;
}
// Critical priority flushes after 1 second deadline
if self.highest_priority == FlushPriority::Critical {
if let Some(critical_time) = self.first_critical_time {
if critical_time.elapsed().as_millis() >= CRITICAL_FLUSH_DEADLINE_MS as u128 {
return true;
}
}
}
// Normal flushing conditions
self.pending_operations.len() >= self.max_operations ||
self.last_flush.elapsed() >= flush_interval
}
/// Get the number of pending operations
pub fn len(&self) -> usize {
self.pending_operations.len()
}
/// Check if the buffer is empty
pub fn is_empty(&self) -> bool {
self.pending_operations.is_empty()
}
}
/// Battery status for adaptive flushing
#[derive(Debug, Clone, Copy, Resource)]
pub struct BatteryStatus {
/// Battery level from 0.0 to 1.0
pub level: f32,
/// Whether the device is currently charging
pub is_charging: bool,
/// Whether low power mode is enabled (iOS)
pub is_low_power_mode: bool,
}
impl Default for BatteryStatus {
fn default() -> Self {
Self {
level: 1.0,
is_charging: false,
is_low_power_mode: false,
}
}
}
impl BatteryStatus {
/// Update battery status from iOS UIDevice.batteryLevel
///
/// # iOS Integration Example
///
/// ```swift
/// // In your iOS app code:
/// UIDevice.current.isBatteryMonitoringEnabled = true
/// let batteryLevel = UIDevice.current.batteryLevel // Returns 0.0 to 1.0
/// let isCharging = UIDevice.current.batteryState == .charging ||
/// UIDevice.current.batteryState == .full
/// let isLowPowerMode = ProcessInfo.processInfo.isLowPowerModeEnabled
///
/// // Update Bevy resource (this is pseudocode - actual implementation depends on your bridge)
/// battery_status.update_from_ios(batteryLevel, isCharging, isLowPowerMode);
/// ```
pub fn update_from_ios(&mut self, level: f32, is_charging: bool, is_low_power_mode: bool) {
self.level = level.clamp(0.0, 1.0);
self.is_charging = is_charging;
self.is_low_power_mode = is_low_power_mode;
}
/// Check if the device is in a battery-critical state
///
/// Returns true if battery is low (<20%) and not charging, or low power
/// mode is enabled.
pub fn is_battery_critical(&self) -> bool {
(self.level < 0.2 && !self.is_charging) || self.is_low_power_mode
}
}
/// Session state tracking for crash detection
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SessionState {
pub session_id: String,
pub started_at: DateTime<Utc>,
pub clean_shutdown: bool,
}
impl SessionState {
pub fn new() -> Self {
Self {
session_id: uuid::Uuid::new_v4().to_string(),
started_at: Utc::now(),
clean_shutdown: false,
}
}
}
impl Default for SessionState {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dirty_entities_tracking() {
let mut dirty = DirtyEntities::new();
let entity_id = EntityId::new_v4();
dirty.mark_dirty(entity_id, "Transform");
assert!(dirty.is_dirty(&entity_id));
assert_eq!(dirty.count(), 1);
dirty.clear();
assert!(!dirty.is_dirty(&entity_id));
assert_eq!(dirty.count(), 0);
}
#[test]
fn test_write_buffer_coalescing() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Add first version
buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![1, 2, 3],
})?;
assert_eq!(buffer.len(), 1);
// Add second version (should replace first)
buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![4, 5, 6],
})?;
assert_eq!(buffer.len(), 1);
// Verify only latest version exists
let ops = buffer.take_operations();
assert_eq!(ops.len(), 1);
if let PersistenceOp::UpsertComponent { data, .. } = &ops[0] {
assert_eq!(data, &vec![4, 5, 6]);
} else {
panic!("Expected UpsertComponent");
}
Ok(())
}
#[test]
fn test_write_buffer_different_components() {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Add Transform
buffer
.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![1, 2, 3],
})
.expect("Should successfully add Transform");
// Add Velocity (different component, should not coalesce)
buffer
.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "Velocity".to_string(),
data: vec![4, 5, 6],
})
.expect("Should successfully add Velocity");
assert_eq!(buffer.len(), 2);
}
#[test]
fn test_flush_priority_immediate() {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Add operation with immediate priority
buffer
.add_with_priority(
PersistenceOp::UpsertEntity {
id: entity_id,
data: EntityData {
id: entity_id,
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
entity_type: "TestEntity".to_string(),
},
},
FlushPriority::Immediate,
)
.expect("Should successfully add entity with immediate priority");
// Should flush immediately regardless of interval
assert!(buffer.should_flush(std::time::Duration::from_secs(100)));
assert_eq!(buffer.highest_priority, FlushPriority::Immediate);
}
#[test]
fn test_flush_priority_critical_deadline() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Add operation with critical priority
buffer.add_with_priority(
PersistenceOp::UpsertEntity {
id: entity_id,
data: EntityData {
id: entity_id,
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
entity_type: "TestEntity".to_string(),
},
},
FlushPriority::Critical,
)?;
assert_eq!(buffer.highest_priority, FlushPriority::Critical);
assert!(buffer.first_critical_time.is_some());
// Should not flush immediately
assert!(!buffer.should_flush(std::time::Duration::from_secs(100)));
// Simulate deadline passing by manually setting the time
buffer.first_critical_time = Some(
Instant::now() - std::time::Duration::from_millis(CRITICAL_FLUSH_DEADLINE_MS + 100),
);
// Now should flush due to deadline
assert!(buffer.should_flush(std::time::Duration::from_secs(100)));
Ok(())
}
#[test]
fn test_flush_priority_normal() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Add normal priority operation
buffer.add(PersistenceOp::UpsertEntity {
id: entity_id,
data: EntityData {
id: entity_id,
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
entity_type: "TestEntity".to_string(),
},
})?;
assert_eq!(buffer.highest_priority, FlushPriority::Normal);
assert!(buffer.first_critical_time.is_none());
// Should not flush before interval
assert!(!buffer.should_flush(std::time::Duration::from_secs(100)));
// Set last flush to past
buffer.last_flush = Instant::now() - std::time::Duration::from_secs(200);
// Now should flush
assert!(buffer.should_flush(std::time::Duration::from_secs(100)));
Ok(())
}
#[test]
fn test_priority_reset_on_take() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Add critical operation
buffer.add_with_priority(
PersistenceOp::UpsertEntity {
id: entity_id,
data: EntityData {
id: entity_id,
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
entity_type: "TestEntity".to_string(),
},
},
FlushPriority::Critical,
)?;
assert_eq!(buffer.highest_priority, FlushPriority::Critical);
assert!(buffer.first_critical_time.is_some());
// Take operations
let ops = buffer.take_operations();
assert_eq!(ops.len(), 1);
// Priority should be reset
assert_eq!(buffer.highest_priority, FlushPriority::Normal);
assert!(buffer.first_critical_time.is_none());
Ok(())
}
#[test]
fn test_default_priority_for_crdt_ops() {
let node_id = NodeId::new_v4();
let log_op = PersistenceOp::LogOperation {
node_id,
sequence: 1,
operation: vec![1, 2, 3],
};
let vector_clock_op = PersistenceOp::UpdateVectorClock {
node_id,
counter: 42,
};
let entity_op = PersistenceOp::UpsertEntity {
id: EntityId::new_v4(),
data: EntityData {
id: EntityId::new_v4(),
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
entity_type: "TestEntity".to_string(),
},
};
// CRDT operations should have Critical priority
assert_eq!(log_op.default_priority(), FlushPriority::Critical);
assert_eq!(vector_clock_op.default_priority(), FlushPriority::Critical);
// Other operations should have Normal priority
assert_eq!(entity_op.default_priority(), FlushPriority::Normal);
}
#[test]
fn test_index_consistency_after_operations() -> Result<(), crate::persistence::PersistenceError>
{
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Add component multiple times - should only keep latest
for i in 0..10 {
buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![i],
})?;
}
// Buffer should only have 1 operation (latest)
assert_eq!(buffer.len(), 1);
// Verify it's the latest data
let ops = buffer.take_operations();
assert_eq!(ops.len(), 1);
if let PersistenceOp::UpsertComponent { data, .. } = &ops[0] {
assert_eq!(data, &vec![9]);
} else {
panic!("Expected UpsertComponent");
}
// After take, indices should be cleared and we can reuse
buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![100],
})?;
assert_eq!(buffer.len(), 1);
Ok(())
}
#[test]
fn test_index_handles_multiple_entities() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100);
let entity1 = EntityId::new_v4();
let entity2 = EntityId::new_v4();
// Add same component type for different entities
buffer.add(PersistenceOp::UpsertComponent {
entity_id: entity1,
component_type: "Transform".to_string(),
data: vec![1],
})?;
buffer.add(PersistenceOp::UpsertComponent {
entity_id: entity2,
component_type: "Transform".to_string(),
data: vec![2],
})?;
// Should have 2 operations (different entities)
assert_eq!(buffer.len(), 2);
// Update first entity
buffer.add(PersistenceOp::UpsertComponent {
entity_id: entity1,
component_type: "Transform".to_string(),
data: vec![3],
})?;
// Still 2 operations (first was replaced in-place)
assert_eq!(buffer.len(), 2);
Ok(())
}
#[test]
fn test_add_with_default_priority() {
let mut buffer = WriteBuffer::new(100);
let node_id = NodeId::new_v4();
// Add CRDT operation using default priority
buffer
.add_with_default_priority(PersistenceOp::LogOperation {
node_id,
sequence: 1,
operation: vec![1, 2, 3],
})
.unwrap();
// Should be tracked as Critical
assert_eq!(buffer.highest_priority, FlushPriority::Critical);
assert!(buffer.first_critical_time.is_some());
}
#[test]
fn test_oversized_component_returns_error() {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Create 11MB component (exceeds 10MB limit)
let oversized_data = vec![0u8; 11 * 1024 * 1024];
let result = buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "HugeComponent".to_string(),
data: oversized_data,
});
// Should return error, not panic
assert!(result.is_err());
match result {
| Err(crate::persistence::PersistenceError::ComponentTooLarge {
component_type,
size_bytes,
max_bytes,
}) => {
assert_eq!(component_type, "HugeComponent");
assert_eq!(size_bytes, 11 * 1024 * 1024);
assert_eq!(max_bytes, MAX_COMPONENT_SIZE_BYTES);
},
| _ => panic!("Expected ComponentTooLarge error"),
}
// Buffer should be unchanged
assert_eq!(buffer.len(), 0);
}
#[test]
fn test_max_size_component_succeeds() {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Create exactly 10MB component (at limit)
let max_data = vec![0u8; 10 * 1024 * 1024];
let result = buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "MaxComponent".to_string(),
data: max_data,
});
assert!(result.is_ok());
assert_eq!(buffer.len(), 1);
}
#[test]
fn test_oversized_operation_returns_error() {
let mut buffer = WriteBuffer::new(100);
let oversized_op = vec![0u8; 11 * 1024 * 1024];
let result = buffer.add(PersistenceOp::LogOperation {
node_id: uuid::Uuid::new_v4(),
sequence: 1,
operation: oversized_op,
});
assert!(result.is_err());
match result {
| Err(crate::persistence::PersistenceError::ComponentTooLarge {
component_type,
..
}) => {
assert_eq!(component_type, "Operation");
},
| _ => panic!("Expected ComponentTooLarge error for Operation"),
}
}
#[test]
fn test_write_buffer_never_panics_property() {
// Property test: WriteBuffer should never panic on any size
let sizes = [
0,
1000,
1_000_000,
5_000_000,
10_000_000, // Exactly at limit
10_000_001, // Just over limit
11_000_000,
100_000_000,
];
for size in sizes {
let mut buffer = WriteBuffer::new(100);
let data = vec![0u8; size];
let result = buffer.add(PersistenceOp::UpsertComponent {
entity_id: uuid::Uuid::new_v4(),
component_type: "TestComponent".to_string(),
data,
});
// Should never panic, always return Ok or Err
match result {
| Ok(_) => assert!(
size <= MAX_COMPONENT_SIZE_BYTES,
"Size {} should have failed",
size
),
| Err(_) => assert!(
size > MAX_COMPONENT_SIZE_BYTES,
"Size {} should have succeeded",
size
),
}
}
}
}