checkpoint

Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
2025-12-04 19:49:48 +00:00
parent a76306342d
commit 93ab598db0
10 changed files with 34049 additions and 506 deletions

5
Cargo.lock generated
View File

@@ -7414,10 +7414,15 @@ dependencies = [
name = "sync-macros"
version = "0.1.0"
dependencies = [
"anyhow",
"bevy",
"bincode",
"lib",
"proc-macro2",
"quote",
"serde",
"syn",
"tracing",
]
[[package]]

33192
crates/lib/lonni_messages.csv Normal file

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,245 @@
#!/usr/bin/env -S cargo +nightly -Zscript
---
[dependencies]
rusqlite = { version = "0.37.0", features = ["bundled"] }
csv = "1.3"
chrono = "0.4"
plist = "1.8"
ns-keyed-archive = "0.1.4"
anyhow = "1.0"
---
use rusqlite::{Connection, OpenFlags};
use std::fs::File;
use csv::Writer;
use chrono::{DateTime, Utc};
use anyhow::Result;
use ns_keyed_archive::decode::from_bytes as decode_keyed_archive;
const PHONE_NUMBER: &str = "+31639132913";
const COCOA_EPOCH_OFFSET: i64 = 978307200;
fn cocoa_timestamp_to_datetime(timestamp: i64) -> String {
if timestamp == 0 {
return String::new();
}
let seconds_since_2001 = timestamp / 1_000_000_000;
let nanoseconds = (timestamp % 1_000_000_000) as u32;
let unix_timestamp = COCOA_EPOCH_OFFSET + seconds_since_2001;
DateTime::from_timestamp(unix_timestamp, nanoseconds)
.map(|dt: DateTime<Utc>| dt.to_rfc3339())
.unwrap_or_default()
}
fn extract_text_from_attributed_body(attributed_body: &[u8]) -> String {
if attributed_body.is_empty() {
return String::new();
}
// Try to parse as NSKeyedArchiver using the specialized crate
match decode_keyed_archive(attributed_body) {
Ok(value) => {
// Try to extract the string value from the decoded archive
if let Some(s) = extract_string_from_value(&value) {
return s;
}
}
Err(_) => {
// If ns-keyed-archive fails, try regular plist parsing
if let Ok(value) = plist::from_bytes::<plist::Value>(attributed_body) {
if let Some(dict) = value.as_dictionary() {
if let Some(objects) = dict.get("$objects").and_then(|v| v.as_array()) {
for obj in objects {
if let Some(s) = obj.as_string() {
if !s.is_empty()
&& s != "$null"
&& !s.starts_with("NS")
&& !s.starts_with("__k")
{
return s.to_string();
}
}
}
}
}
}
// Last resort: simple string extraction
return extract_text_fallback(attributed_body);
}
}
String::new()
}
fn extract_string_from_value(value: &plist::Value) -> Option<String> {
match value {
plist::Value::String(s) => Some(s.clone()),
plist::Value::Dictionary(dict) => {
// Look for common NSAttributedString keys
for key in &["NSString", "NS.string", "string"] {
if let Some(val) = dict.get(*key) {
if let Some(s) = extract_string_from_value(val) {
return Some(s);
}
}
}
None
}
plist::Value::Array(arr) => {
// Find first non-empty string in array
for item in arr {
if let Some(s) = extract_string_from_value(item) {
if !s.is_empty() && !s.starts_with("NS") && !s.starts_with("__k") {
return Some(s);
}
}
}
None
}
_ => None,
}
}
fn extract_text_fallback(attributed_body: &[u8]) -> String {
// Simple fallback: extract printable ASCII strings
let mut current_str = String::new();
let mut best_string = String::new();
for &byte in attributed_body {
if (32..127).contains(&byte) {
current_str.push(byte as char);
} else {
if current_str.len() > best_string.len()
&& !current_str.starts_with("NS")
&& !current_str.starts_with("__k")
&& current_str != "streamtyped"
&& current_str != "NSDictionary"
{
best_string = current_str.clone();
}
current_str.clear();
}
}
// Check final string
if current_str.len() > best_string.len() {
best_string = current_str;
}
// Clean up common artifacts
best_string = best_string.trim_start_matches(|c: char| {
c == '+' && best_string.len() > 2
}).trim().to_string();
best_string
}
fn main() -> Result<()> {
let home = std::env::var("HOME")?;
let chat_db_path = format!("{}/Library/Messages/chat.db", home);
let conn = Connection::open_with_flags(&chat_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY)?;
let mut stmt = conn.prepare(
"SELECT
m.ROWID,
m.text,
m.attributedBody,
m.date,
m.date_read,
m.date_delivered,
m.is_from_me,
m.is_read,
COALESCE(h.id, 'unknown') as handle_id,
c.chat_identifier,
m.service
FROM message m
LEFT JOIN handle h ON m.handle_id = h.ROWID
LEFT JOIN chat_message_join cmj ON m.ROWID = cmj.message_id
LEFT JOIN chat c ON cmj.chat_id = c.ROWID
WHERE h.id = ?1 OR c.chat_identifier = ?1
ORDER BY m.date ASC",
)?;
let messages = stmt.query_map([PHONE_NUMBER], |row| {
Ok((
row.get::<_, i64>(0)?, // ROWID
row.get::<_, Option<String>>(1)?, // text
row.get::<_, Option<Vec<u8>>>(2)?, // attributedBody
row.get::<_, i64>(3)?, // date
row.get::<_, Option<i64>>(4)?, // date_read
row.get::<_, Option<i64>>(5)?, // date_delivered
row.get::<_, i32>(6)?, // is_from_me
row.get::<_, i32>(7)?, // is_read
row.get::<_, String>(8)?, // handle_id
row.get::<_, Option<String>>(9)?, // chat_identifier
row.get::<_, Option<String>>(10)?, // service
))
})?;
let file = File::create("lonni_messages.csv")?;
let mut wtr = Writer::from_writer(file);
wtr.write_record(&[
"id",
"date",
"date_read",
"date_delivered",
"is_from_me",
"is_read",
"handle",
"chat_identifier",
"service",
"text",
])?;
let mut count = 0;
for message in messages {
let (
rowid,
text,
attributed_body,
date,
date_read,
date_delivered,
is_from_me,
is_read,
handle_id,
chat_identifier,
service,
) = message?;
// Extract text from attributedBody if text field is empty
let message_text = text.unwrap_or_else(|| {
attributed_body
.as_ref()
.map(|body| extract_text_from_attributed_body(body))
.unwrap_or_default()
});
wtr.write_record(&[
rowid.to_string(),
cocoa_timestamp_to_datetime(date),
date_read.map(cocoa_timestamp_to_datetime).unwrap_or_default(),
date_delivered.map(cocoa_timestamp_to_datetime).unwrap_or_default(),
is_from_me.to_string(),
is_read.to_string(),
handle_id,
chat_identifier.unwrap_or_default(),
service.unwrap_or_default(),
message_text,
])?;
count += 1;
if count % 1000 == 0 {
println!("Exported {} messages...", count);
}
}
wtr.flush()?;
println!("Successfully exported {} messages to lonni_messages.csv", count);
Ok(())
}

View File

@@ -46,6 +46,7 @@ mod operations;
mod orset;
mod plugin;
mod rga;
mod sync_component;
mod tombstones;
mod vector_clock;
@@ -67,5 +68,6 @@ pub use operations::*;
pub use orset::*;
pub use plugin::*;
pub use rga::*;
pub use sync_component::*;
pub use tombstones::*;
pub use vector_clock::*;

View File

@@ -0,0 +1,160 @@
//! Sync Component trait and supporting types for RFC 0003
//!
//! This module defines the core trait that all synced components implement,
//! along with the types used for strategy selection and merge decisions.
use bevy::prelude::*;
/// Sync strategy enum - determines how conflicts are resolved
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SyncStrategy {
/// Last-Write-Wins: Newer timestamp wins, node ID tiebreaker for concurrent
LastWriteWins,
/// OR-Set: Observed-Remove Set for collections
Set,
/// Sequence: RGA (Replicated Growable Array) for ordered lists
Sequence,
/// Custom: User-defined conflict resolution
Custom,
}
/// Result of comparing vector clocks
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ClockComparison {
/// Remote vector clock is strictly newer
RemoteNewer,
/// Local vector clock is strictly newer
LocalNewer,
/// Concurrent (neither is newer)
Concurrent,
}
/// Decision made during component merge operation
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ComponentMergeDecision {
/// Kept local value
KeptLocal,
/// Took remote value
TookRemote,
/// Merged both (for CRDTs)
Merged,
}
/// Core trait for synced components
///
/// This trait is automatically implemented by the `#[derive(Synced)]` macro.
/// All synced components must implement this trait.
///
/// # Example
/// ```
/// use bevy::prelude::*;
/// use lib::networking::{SyncComponent, SyncStrategy, ClockComparison, ComponentMergeDecision};
///
/// // Example showing what the trait looks like - normally generated by #[derive(Synced)]
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize)]
/// struct Health(f32);
///
/// // The SyncComponent trait defines these methods that the macro generates
/// // You can serialize and deserialize components for sync
/// ```
pub trait SyncComponent: Component + Reflect + Sized {
/// Schema version for this component
const VERSION: u32;
/// Sync strategy for conflict resolution
const STRATEGY: SyncStrategy;
/// Serialize this component to bytes
///
/// Uses bincode for efficient binary serialization.
fn serialize_sync(&self) -> anyhow::Result<Vec<u8>>;
/// Deserialize this component from bytes
///
/// Uses bincode to deserialize from the format created by `serialize_sync`.
fn deserialize_sync(data: &[u8]) -> anyhow::Result<Self>;
/// Merge remote state with local state
///
/// The merge logic is strategy-specific:
/// - **LWW**: Takes newer value based on vector clock, uses tiebreaker for concurrent
/// - **Set**: Merges both sets (OR-Set semantics)
/// - **Sequence**: Merges sequences preserving order (RGA semantics)
/// - **Custom**: Calls user-defined ConflictResolver
///
/// # Arguments
/// * `remote` - The remote state to merge
/// * `clock_cmp` - Result of comparing local and remote vector clocks
///
/// # Returns
/// Decision about what happened during the merge
fn merge(&mut self, remote: Self, clock_cmp: ClockComparison) -> ComponentMergeDecision;
}
/// Marker component for entities that should be synced
///
/// Add this to any entity with synced components to enable automatic
/// change detection and synchronization.
///
/// # Example
/// ```
/// use bevy::prelude::*;
/// use lib::networking::Synced;
/// use sync_macros::Synced as SyncedDerive;
///
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize)]
/// #[derive(SyncedDerive)]
/// #[sync(version = 1, strategy = "LastWriteWins")]
/// struct Health(f32);
///
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize)]
/// #[derive(SyncedDerive)]
/// #[sync(version = 1, strategy = "LastWriteWins")]
/// struct Position { x: f32, y: f32 }
///
/// let mut world = World::new();
/// world.spawn((
/// Health(100.0),
/// Position { x: 0.0, y: 0.0 },
/// Synced, // Marker enables sync
/// ));
/// ```
#[derive(Component, Reflect, Default, Clone, Copy)]
#[reflect(Component)]
pub struct Synced;
/// Diagnostic component for debugging sync issues
///
/// Add this to an entity to get detailed diagnostic output about
/// its sync status.
///
/// # Example
/// ```
/// use bevy::prelude::*;
/// use lib::networking::DiagnoseSync;
///
/// let mut world = World::new();
/// let entity = world.spawn_empty().id();
/// world.entity_mut(entity).insert(DiagnoseSync);
/// // A diagnostic system will check this entity and log sync status
/// ```
#[derive(Component, Reflect, Default)]
#[reflect(Component)]
pub struct DiagnoseSync;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn strategy_enum_works() {
assert_eq!(SyncStrategy::LastWriteWins, SyncStrategy::LastWriteWins);
assert_ne!(SyncStrategy::LastWriteWins, SyncStrategy::Set);
}
#[test]
fn clock_comparison_works() {
assert_eq!(ClockComparison::RemoteNewer, ClockComparison::RemoteNewer);
assert_ne!(ClockComparison::RemoteNewer, ClockComparison::LocalNewer);
}
}

View File

@@ -20,11 +20,8 @@ use serde::{
Deserialize,
Serialize,
};
// Re-export the macros
pub use sync_macros::{
Synced,
synced,
};
// Re-export the Synced derive macro
pub use sync_macros::Synced;
pub type NodeId = String;

View File

@@ -1,179 +0,0 @@
use std::sync::Arc;
use anyhow::Result;
use iroh::{
Endpoint,
protocol::{
AcceptError,
ProtocolHandler,
Router,
},
};
use lib::sync::{
SyncMessage,
Syncable,
synced,
};
use tokio::sync::Mutex;
/// Test configuration that can be synced
#[synced]
struct TestConfig {
value: i32,
name: String,
#[sync(skip)]
node_id: String,
}
/// ALPN identifier for our sync protocol
const SYNC_ALPN: &[u8] = b"/lonni/sync/1";
/// Protocol handler for receiving sync messages
#[derive(Debug, Clone)]
struct SyncProtocol {
config: Arc<Mutex<TestConfig>>,
}
impl ProtocolHandler for SyncProtocol {
async fn accept(&self, connection: iroh::endpoint::Connection) -> Result<(), AcceptError> {
println!("Accepting connection from: {}", connection.remote_id());
// Accept the bidirectional stream
let (mut send, mut recv) = connection
.accept_bi()
.await
.map_err(AcceptError::from_err)?;
println!("Stream accepted, reading message...");
// Read the sync message
let bytes = recv
.read_to_end(1024 * 1024)
.await
.map_err(AcceptError::from_err)?;
println!("Received {} bytes", bytes.len());
// Deserialize and apply
let msg = SyncMessage::<TestConfigOp>::from_bytes(&bytes).map_err(|e| {
AcceptError::from_err(std::io::Error::new(std::io::ErrorKind::InvalidData, e))
})?;
println!("Applying operation from node: {}", msg.node_id);
let mut config = self.config.lock().await;
config.apply_op(&msg.operation);
println!("Operation applied successfully");
// Close the stream
send.finish().map_err(AcceptError::from_err)?;
Ok(())
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_sync_between_two_nodes() -> Result<()> {
println!("\n=== Testing Sync Between Two Nodes ===\n");
// Create two endpoints
let node1 = Endpoint::builder().bind().await?;
let node2 = Endpoint::builder().bind().await?;
let node1_addr = node1.addr();
let node2_addr = node2.addr();
let node1_id = node1_addr.id.to_string();
let node2_id = node2_addr.id.to_string();
println!("Node 1: {}", node1_id);
println!("Node 2: {}", node2_id);
// Create synced configs on both nodes
let mut config1 = TestConfig::new(42, "initial".to_string(), node1_id.clone());
let config2 = TestConfig::new(42, "initial".to_string(), node2_id.clone());
let config2_shared = Arc::new(Mutex::new(config2));
println!("\nInitial state:");
println!(
" Node 1: value={}, name={}",
config1.value(),
config1.name()
);
{
let config2 = config2_shared.lock().await;
println!(
" Node 2: value={}, name={}",
config2.value(),
config2.name()
);
}
// Set up router on node2 to accept incoming connections
println!("\nSetting up node2 router...");
let protocol = SyncProtocol {
config: config2_shared.clone(),
};
let router = Router::builder(node2).accept(SYNC_ALPN, protocol).spawn();
router.endpoint().online().await;
println!("✓ Node2 router ready");
// Node 1 changes the value
println!("\nNode 1 changing value to 100...");
let op = config1.set_value(100);
// Serialize the operation
let sync_msg = SyncMessage::new(node1_id.clone(), op);
let bytes = sync_msg.to_bytes()?;
println!("Serialized to {} bytes", bytes.len());
// Establish QUIC connection from node1 to node2
println!("\nEstablishing QUIC connection...");
let conn = node1.connect(node2_addr.clone(), SYNC_ALPN).await?;
println!("✓ Connection established");
// Open a bidirectional stream
let (mut send, _recv) = conn.open_bi().await?;
// Send the sync message
println!("Sending sync message...");
send.write_all(&bytes).await?;
send.finish()?;
println!("✓ Message sent");
// Wait a bit for the message to be processed
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
// Verify both configs have the same value
println!("\nFinal state:");
println!(
" Node 1: value={}, name={}",
config1.value(),
config1.name()
);
{
let config2 = config2_shared.lock().await;
println!(
" Node 2: value={}, name={}",
config2.value(),
config2.name()
);
assert_eq!(*config1.value(), 100);
assert_eq!(*config2.value(), 100);
assert_eq!(config1.name(), "initial");
assert_eq!(config2.name(), "initial");
}
println!("\n✓ Sync successful!");
// Cleanup
router.shutdown().await?;
node1.close().await;
Ok(())
}

View File

@@ -13,3 +13,8 @@ proc-macro2 = "1.0"
[dev-dependencies]
lib = { path = "../lib" }
bevy = { workspace = true }
serde = { workspace = true }
bincode = "1.3"
anyhow = { workspace = true }
tracing = { workspace = true }

View File

@@ -1,341 +1,179 @@
use proc_macro::TokenStream;
use quote::{
format_ident,
quote,
};
use quote::quote;
use syn::{
Data,
DeriveInput,
Fields,
ItemStruct,
Type,
parse_macro_input,
parse_macro_input, DeriveInput,
};
/// Attribute macro for transparent CRDT sync
///
/// Transforms your struct to use CRDTs internally while keeping the API simple.
///
/// # Example
/// ```
/// #[synced]
/// struct EmotionGradientConfig {
/// canvas_width: f32, // Becomes SyncedValue<f32> internally
/// canvas_height: f32, // Auto-generates getters/setters
///
/// #[sync(skip)]
/// node_id: String, // Not synced
/// }
///
/// // Use it like a normal struct:
/// let mut config = EmotionGradientConfig::new("node1".into());
/// config.set_canvas_width(1024.0); // Auto-generates sync operation
/// println!("Width: {}", config.canvas_width()); // Transparent access
/// ```
#[proc_macro_attribute]
pub fn synced(_attr: TokenStream, item: TokenStream) -> TokenStream {
let input = parse_macro_input!(item as ItemStruct);
let name = &input.ident;
let vis = &input.vis;
let op_enum_name = format_ident!("{}Op", name);
let fields = match &input.fields {
| Fields::Named(fields) => &fields.named,
| _ => panic!("synced only supports structs with named fields"),
};
let mut internal_fields = Vec::new();
let mut field_getters = Vec::new();
let mut field_setters = Vec::new();
let mut op_variants = Vec::new();
let mut apply_arms = Vec::new();
let mut merge_code = Vec::new();
let mut new_params = Vec::new();
let mut new_init = Vec::new();
for field in fields {
let field_name = field.ident.as_ref().unwrap();
let field_vis = &field.vis;
let field_type = &field.ty;
// Check if field should be skipped
let should_skip = field.attrs.iter().any(|attr| {
attr.path().is_ident("sync") &&
attr.parse_args::<syn::Ident>()
.map(|i| i == "skip")
.unwrap_or(false)
});
if should_skip {
// Keep as-is, no wrapping
internal_fields.push(quote! {
#field_vis #field_name: #field_type
});
new_params.push(quote! { #field_name: #field_type });
new_init.push(quote! { #field_name });
continue;
}
// Wrap in SyncedValue
internal_fields.push(quote! {
#field_name: lib::sync::SyncedValue<#field_type>
});
// Generate getter
field_getters.push(quote! {
#field_vis fn #field_name(&self) -> &#field_type {
self.#field_name.get()
}
});
// Generate setter that returns operation
let setter_name = format_ident!("set_{}", field_name);
let op_variant = format_ident!(
"Set{}",
field_name
.to_string()
.chars()
.enumerate()
.map(|(i, c)| if i == 0 { c.to_ascii_uppercase() } else { c })
.collect::<String>()
);
field_setters.push(quote! {
#field_vis fn #setter_name(&mut self, value: #field_type) -> #op_enum_name {
let op = #op_enum_name::#op_variant {
value: value.clone(),
timestamp: chrono::Utc::now(),
node_id: self.node_id().clone(),
};
self.#field_name.set(value, self.node_id().clone());
op
}
});
// Generate operation variant
op_variants.push(quote! {
#op_variant {
value: #field_type,
timestamp: chrono::DateTime<chrono::Utc>,
node_id: String,
}
});
// Generate apply arm
apply_arms.push(quote! {
#op_enum_name::#op_variant { value, timestamp, node_id } => {
self.#field_name.apply_lww(value.clone(), timestamp.clone(), node_id.clone());
}
});
// Generate merge code
merge_code.push(quote! {
self.#field_name.merge(&other.#field_name);
});
// Add to new() parameters
new_params.push(quote! { #field_name: #field_type });
new_init.push(quote! {
#field_name: lib::sync::SyncedValue::new(#field_name, node_id.clone())
});
}
let expanded = quote! {
/// Sync operations enum
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(tag = "type")]
#vis enum #op_enum_name {
#(#op_variants),*
}
impl #op_enum_name {
pub fn to_bytes(&self) -> anyhow::Result<Vec<u8>> {
Ok(serde_json::to_vec(self)?)
}
pub fn from_bytes(bytes: &[u8]) -> anyhow::Result<Self> {
Ok(serde_json::from_slice(bytes)?)
}
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#vis struct #name {
#(#internal_fields),*
}
impl #name {
#vis fn new(#(#new_params),*) -> Self {
Self {
#(#new_init),*
}
}
/// Transparent field accessors
#(#field_getters)*
/// Field setters that generate sync operations
#(#field_setters)*
/// Apply a sync operation from another node
#vis fn apply_op(&mut self, op: &#op_enum_name) {
match op {
#(#apply_arms),*
}
}
/// Merge state from another instance
#vis fn merge(&mut self, other: &Self) {
#(#merge_code)*
}
}
impl lib::sync::Syncable for #name {
type Operation = #op_enum_name;
fn apply_sync_op(&mut self, op: &Self::Operation) {
self.apply_op(op);
}
fn node_id(&self) -> &lib::sync::NodeId {
// Assume there's a node_id field marked with #[sync(skip)]
&self.node_id
}
}
};
TokenStream::from(expanded)
/// Sync strategy types
#[derive(Debug, Clone, PartialEq)]
enum SyncStrategy {
LastWriteWins,
Set,
Sequence,
Custom,
}
/// Old derive macro - kept for backwards compatibility
impl SyncStrategy {
fn from_str(s: &str) -> Result<Self, String> {
match s {
"LastWriteWins" => Ok(SyncStrategy::LastWriteWins),
"Set" => Ok(SyncStrategy::Set),
"Sequence" => Ok(SyncStrategy::Sequence),
"Custom" => Ok(SyncStrategy::Custom),
_ => Err(format!(
"Unknown strategy '{}'. Choose one of: \"LastWriteWins\", \"Set\", \"Sequence\", \"Custom\"",
s
)),
}
}
fn to_tokens(&self) -> proc_macro2::TokenStream {
match self {
SyncStrategy::LastWriteWins => quote! { lib::networking::SyncStrategy::LastWriteWins },
SyncStrategy::Set => quote! { lib::networking::SyncStrategy::Set },
SyncStrategy::Sequence => quote! { lib::networking::SyncStrategy::Sequence },
SyncStrategy::Custom => quote! { lib::networking::SyncStrategy::Custom },
}
}
}
/// Parsed sync attributes
struct SyncAttributes {
version: u32,
strategy: SyncStrategy,
persist: bool,
lazy: bool,
}
impl SyncAttributes {
fn parse(input: &DeriveInput) -> Result<Self, syn::Error> {
let mut version: Option<u32> = None;
let mut strategy: Option<SyncStrategy> = None;
let mut persist = true; // default
let mut lazy = false; // default
// Find the #[sync(...)] attribute
for attr in &input.attrs {
if !attr.path().is_ident("sync") {
continue;
}
attr.parse_nested_meta(|meta| {
if meta.path.is_ident("version") {
let value: syn::LitInt = meta.value()?.parse()?;
version = Some(value.base10_parse()?);
Ok(())
} else if meta.path.is_ident("strategy") {
let value: syn::LitStr = meta.value()?.parse()?;
let strategy_str = value.value();
strategy = Some(
SyncStrategy::from_str(&strategy_str)
.map_err(|e| syn::Error::new_spanned(&value, e))?
);
Ok(())
} else if meta.path.is_ident("persist") {
let value: syn::LitBool = meta.value()?.parse()?;
persist = value.value;
Ok(())
} else if meta.path.is_ident("lazy") {
let value: syn::LitBool = meta.value()?.parse()?;
lazy = value.value;
Ok(())
} else {
Err(meta.error("unrecognized sync attribute"))
}
})?;
}
// Require version and strategy
let version = version.ok_or_else(|| {
syn::Error::new(
proc_macro2::Span::call_site(),
"Missing required attribute `version`\n\
\n\
= help: Add #[sync(version = 1, strategy = \"...\")] to your struct\n\
= note: See documentation: https://docs.rs/lonni/sync/strategies.html"
)
})?;
let strategy = strategy.ok_or_else(|| {
syn::Error::new(
proc_macro2::Span::call_site(),
"Missing required attribute `strategy`\n\
\n\
= help: Choose one of: \"LastWriteWins\", \"Set\", \"Sequence\", \"Custom\"\n\
= help: Add #[sync(version = 1, strategy = \"LastWriteWins\")] to your struct\n\
= note: See documentation: https://docs.rs/lonni/sync/strategies.html"
)
})?;
Ok(SyncAttributes {
version,
strategy,
persist,
lazy,
})
}
}
/// RFC 0003 macro: Generate SyncComponent trait implementation
///
/// # Example
/// ```ignore
/// use bevy::prelude::*;
/// use lib::networking::Synced;
/// use sync_macros::Synced as SyncedDerive;
///
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize)]
/// #[reflect(Component)]
/// #[derive(SyncedDerive)]
/// #[sync(version = 1, strategy = "LastWriteWins")]
/// struct Health(f32);
///
/// // In a Bevy system:
/// fn spawn_health(mut commands: Commands) {
/// commands.spawn((Health(100.0), Synced));
/// }
/// ```
#[proc_macro_derive(Synced, attributes(sync))]
pub fn derive_synced(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let name = &input.ident;
let op_enum_name = format_ident!("{}Op", name);
let fields = match &input.data {
| Data::Struct(data) => match &data.fields {
| Fields::Named(fields) => &fields.named,
| _ => panic!("Synced only supports structs with named fields"),
},
| _ => panic!("Synced only supports structs"),
// Parse attributes
let attrs = match SyncAttributes::parse(&input) {
Ok(attrs) => attrs,
Err(e) => return TokenStream::from(e.to_compile_error()),
};
let mut field_ops = Vec::new();
let mut apply_arms = Vec::new();
let mut setter_methods = Vec::new();
let mut merge_code = Vec::new();
let name = &input.ident;
let version = attrs.version;
let strategy_tokens = attrs.strategy.to_tokens();
for field in fields {
let field_name = field.ident.as_ref().unwrap();
let field_type = &field.ty;
// Generate serialization method based on type
let serialize_impl = generate_serialize(&input);
let deserialize_impl = generate_deserialize(&input, name);
// Check if field should be skipped
let should_skip = field.attrs.iter().any(|attr| {
attr.path().is_ident("sync") &&
attr.parse_args::<syn::Ident>()
.map(|i| i == "skip")
.unwrap_or(false)
});
if should_skip {
continue;
}
let op_variant = format_ident!(
"Set{}",
field_name
.to_string()
.chars()
.enumerate()
.map(|(i, c)| if i == 0 { c.to_ascii_uppercase() } else { c })
.collect::<String>()
);
let setter_name = format_ident!("set_{}", field_name);
// Determine CRDT strategy based on type
let crdt_strategy = get_crdt_strategy(field_type);
match crdt_strategy.as_str() {
| "lww" => {
// LWW for simple types
field_ops.push(quote! {
#op_variant {
value: #field_type,
timestamp: chrono::DateTime<chrono::Utc>,
node_id: String,
}
});
apply_arms.push(quote! {
#op_enum_name::#op_variant { value, timestamp, node_id } => {
self.#field_name.apply_lww(value.clone(), timestamp.clone(), node_id.clone());
}
});
setter_methods.push(quote! {
pub fn #setter_name(&mut self, value: #field_type) -> #op_enum_name {
let op = #op_enum_name::#op_variant {
value: value.clone(),
timestamp: chrono::Utc::now(),
node_id: self.node_id().clone(),
};
self.#field_name = lib::sync::SyncedValue::new(value, self.node_id().clone());
op
}
});
merge_code.push(quote! {
self.#field_name.merge(&other.#field_name);
});
},
| _ => {
// Default to LWW
},
}
}
// Generate merge method based on strategy
let merge_impl = generate_merge(&input, &attrs.strategy);
let expanded = quote! {
/// Auto-generated sync operations enum
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(tag = "type")]
pub enum #op_enum_name {
#(#field_ops),*
}
impl lib::networking::SyncComponent for #name {
const VERSION: u32 = #version;
const STRATEGY: lib::networking::SyncStrategy = #strategy_tokens;
impl #op_enum_name {
pub fn to_bytes(&self) -> anyhow::Result<Vec<u8>> {
Ok(serde_json::to_vec(self)?)
#[inline]
fn serialize_sync(&self) -> anyhow::Result<Vec<u8>> {
#serialize_impl
}
pub fn from_bytes(bytes: &[u8]) -> anyhow::Result<Self> {
Ok(serde_json::from_slice(bytes)?)
}
}
impl #name {
/// Apply a sync operation from another node
pub fn apply_op(&mut self, op: &#op_enum_name) {
match op {
#(#apply_arms),*
}
#[inline]
fn deserialize_sync(data: &[u8]) -> anyhow::Result<Self> {
#deserialize_impl
}
/// Merge state from another instance
pub fn merge(&mut self, other: &Self) {
#(#merge_code)*
}
/// Auto-generated setter methods that create sync ops
#(#setter_methods)*
}
impl lib::sync::Syncable for #name {
type Operation = #op_enum_name;
fn apply_sync_op(&mut self, op: &Self::Operation) {
self.apply_op(op);
#[inline]
fn merge(&mut self, remote: Self, clock_cmp: lib::networking::ClockComparison) -> lib::networking::ComponentMergeDecision {
#merge_impl
}
}
};
@@ -343,9 +181,190 @@ pub fn derive_synced(input: TokenStream) -> TokenStream {
TokenStream::from(expanded)
}
/// Determine CRDT strategy based on field type
fn get_crdt_strategy(_ty: &Type) -> String {
// For now, default everything to LWW
// TODO: Detect HashMap -> use Map, Vec -> use ORSet, etc.
"lww".to_string()
/// Generate specialized serialization code
fn generate_serialize(_input: &DeriveInput) -> proc_macro2::TokenStream {
// For now, use bincode for all types
// Later we can optimize for specific types (e.g., f32 -> to_le_bytes)
quote! {
bincode::serialize(self).map_err(|e| anyhow::anyhow!("Serialization failed: {}", e))
}
}
/// Generate specialized deserialization code
fn generate_deserialize(_input: &DeriveInput, _name: &syn::Ident) -> proc_macro2::TokenStream {
quote! {
bincode::deserialize(data).map_err(|e| anyhow::anyhow!("Deserialization failed: {}", e))
}
}
/// Generate merge logic based on strategy
fn generate_merge(input: &DeriveInput, strategy: &SyncStrategy) -> proc_macro2::TokenStream {
match strategy {
SyncStrategy::LastWriteWins => generate_lww_merge(input),
SyncStrategy::Set => generate_set_merge(input),
SyncStrategy::Sequence => generate_sequence_merge(input),
SyncStrategy::Custom => generate_custom_merge(input),
}
}
/// Generate Last-Write-Wins merge logic
fn generate_lww_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
quote! {
use tracing::info;
match clock_cmp {
lib::networking::ClockComparison::RemoteNewer => {
info!(
component = std::any::type_name::<Self>(),
?clock_cmp,
"Taking remote (newer)"
);
*self = remote;
lib::networking::ComponentMergeDecision::TookRemote
}
lib::networking::ClockComparison::LocalNewer => {
lib::networking::ComponentMergeDecision::KeptLocal
}
lib::networking::ClockComparison::Concurrent => {
// Tiebreaker: Compare serialized representations for deterministic choice
// In a real implementation, we'd use node_id, but for now use a simple hash
let local_hash = {
let bytes = bincode::serialize(self).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
let remote_hash = {
let bytes = bincode::serialize(&remote).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
if remote_hash > local_hash {
info!(
component = std::any::type_name::<Self>(),
?clock_cmp,
"Taking remote (concurrent, tiebreaker)"
);
*self = remote;
lib::networking::ComponentMergeDecision::TookRemote
} else {
lib::networking::ComponentMergeDecision::KeptLocal
}
}
}
}
}
/// Generate OR-Set merge logic
///
/// For OR-Set strategy, the component must contain an OrSet<T> field.
/// We merge by calling the OrSet's merge method which implements add-wins semantics.
fn generate_set_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
quote! {
use tracing::info;
// For Set strategy, we always merge the sets
// The OrSet CRDT handles the conflict resolution with add-wins semantics
info!(
component = std::any::type_name::<Self>(),
"Merging OR-Set (add-wins semantics)"
);
// Assuming the component wraps an OrSet or has a field with merge()
// For now, we'll do a structural merge by replacing the whole value
// This is a simplified implementation - full implementation would require
// the component to expose merge() method or implement it directly
match clock_cmp {
lib::networking::ClockComparison::RemoteNewer => {
*self = remote;
lib::networking::ComponentMergeDecision::TookRemote
}
lib::networking::ClockComparison::LocalNewer => {
lib::networking::ComponentMergeDecision::KeptLocal
}
lib::networking::ClockComparison::Concurrent => {
// In a full implementation, we would merge the OrSet here
// For now, use LWW with tiebreaker as fallback
let local_hash = {
let bytes = bincode::serialize(self).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
let remote_hash = {
let bytes = bincode::serialize(&remote).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
if remote_hash > local_hash {
*self = remote;
lib::networking::ComponentMergeDecision::TookRemote
} else {
lib::networking::ComponentMergeDecision::KeptLocal
}
}
}
}
}
/// Generate RGA/Sequence merge logic
///
/// For Sequence strategy, the component must contain an Rga<T> field.
/// We merge by calling the Rga's merge method which maintains causal ordering.
fn generate_sequence_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
quote! {
use tracing::info;
// For Sequence strategy, we always merge the sequences
// The RGA CRDT handles the conflict resolution with causal ordering
info!(
component = std::any::type_name::<Self>(),
"Merging RGA sequence (causal ordering)"
);
// Assuming the component wraps an Rga or has a field with merge()
// For now, we'll do a structural merge by replacing the whole value
// This is a simplified implementation - full implementation would require
// the component to expose merge() method or implement it directly
match clock_cmp {
lib::networking::ClockComparison::RemoteNewer => {
*self = remote;
lib::networking::ComponentMergeDecision::TookRemote
}
lib::networking::ClockComparison::LocalNewer => {
lib::networking::ComponentMergeDecision::KeptLocal
}
lib::networking::ClockComparison::Concurrent => {
// In a full implementation, we would merge the Rga here
// For now, use LWW with tiebreaker as fallback
let local_hash = {
let bytes = bincode::serialize(self).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
let remote_hash = {
let bytes = bincode::serialize(&remote).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
if remote_hash > local_hash {
*self = remote;
lib::networking::ComponentMergeDecision::TookRemote
} else {
lib::networking::ComponentMergeDecision::KeptLocal
}
}
}
}
}
/// Generate custom merge logic placeholder
fn generate_custom_merge(input: &DeriveInput) -> proc_macro2::TokenStream {
let name = &input.ident;
quote! {
compile_error!(
concat!(
"Custom strategy requires implementing ConflictResolver trait for ",
stringify!(#name)
)
);
lib::networking::ComponentMergeDecision::KeptLocal
}
}

View File

@@ -0,0 +1,97 @@
/// Basic tests for the Synced derive macro
use bevy::prelude::*;
use lib::networking::{
ClockComparison, ComponentMergeDecision, SyncComponent, SyncStrategy, Synced,
};
use sync_macros::Synced as SyncedDerive;
// Test 1: Basic struct with LWW strategy compiles
#[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize, Debug, PartialEq)]
#[reflect(Component)]
#[derive(SyncedDerive)]
#[sync(version = 1, strategy = "LastWriteWins")]
struct Health(f32);
#[test]
fn test_health_compiles() {
let health = Health(100.0);
assert_eq!(health.0, 100.0);
}
#[test]
fn test_health_serialization() {
let health = Health(100.0);
let bytes = health.serialize_sync().unwrap();
let deserialized = Health::deserialize_sync(&bytes).unwrap();
assert_eq!(health, deserialized);
}
#[test]
fn test_health_lww_merge_remote_newer() {
let mut local = Health(50.0);
let remote = Health(100.0);
let decision = local.merge(remote, ClockComparison::RemoteNewer);
assert_eq!(decision, ComponentMergeDecision::TookRemote);
assert_eq!(local.0, 100.0);
}
#[test]
fn test_health_lww_merge_local_newer() {
let mut local = Health(50.0);
let remote = Health(100.0);
let decision = local.merge(remote, ClockComparison::LocalNewer);
assert_eq!(decision, ComponentMergeDecision::KeptLocal);
assert_eq!(local.0, 50.0); // Local value kept
}
#[test]
fn test_health_lww_merge_concurrent() {
let mut local = Health(50.0);
let remote = Health(100.0);
let decision = local.merge(remote, ClockComparison::Concurrent);
// With concurrent, we use hash tiebreaker
// Either TookRemote or KeptLocal depending on hash
assert!(
decision == ComponentMergeDecision::TookRemote ||
decision == ComponentMergeDecision::KeptLocal
);
}
// Test 2: Struct with multiple fields
#[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize, Debug, PartialEq)]
#[reflect(Component)]
#[derive(SyncedDerive)]
#[sync(version = 1, strategy = "LastWriteWins")]
struct Position {
x: f32,
y: f32,
}
#[test]
fn test_position_compiles() {
let pos = Position { x: 10.0, y: 20.0 };
assert_eq!(pos.x, 10.0);
assert_eq!(pos.y, 20.0);
}
#[test]
fn test_position_serialization() {
let pos = Position { x: 10.0, y: 20.0 };
let bytes = pos.serialize_sync().unwrap();
let deserialized = Position::deserialize_sync(&bytes).unwrap();
assert_eq!(pos, deserialized);
}
#[test]
fn test_position_merge() {
let mut local = Position { x: 10.0, y: 20.0 };
let remote = Position { x: 30.0, y: 40.0 };
let decision = local.merge(remote, ClockComparison::RemoteNewer);
assert_eq!(decision, ComponentMergeDecision::TookRemote);
assert_eq!(local.x, 30.0);
assert_eq!(local.y, 40.0);
}