chore: initial commit

This commit is contained in:
2025-11-15 23:42:12 +00:00
commit 3c456abadc
47 changed files with 14645 additions and 0 deletions

77
.gitignore vendored Normal file
View File

@@ -0,0 +1,77 @@
# SQLite databases
*.db
*.db-shm
*.db-wal
*.sqlite
*.sqlite3
# Compressed files
*.zip
*.tar.gz
*.7z
*.rar
# Rust/Cargo
target/
Cargo.lock # Remove this line if this is a binary crate, keep for libraries
**/*.rs.bk
*.pdb
# macOS
.DS_Store
.AppleDouble
.LSOverride
._*
# Windows
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
Desktop.ini
# Linux
*~
.directory
# IDEs and editors
.idea/
.vscode/
*.swp
*.swo
*~
.project
.classpath
.settings/
*.sublime-project
*.sublime-workspace
# Environment and config files that may contain secrets
.env
.env.local
.env.*.local
config.toml # Remove this if you want to track config
*.local.toml
# Logs
*.log
logs/
# Temporary files
tmp/
temp/
*.tmp
# Generated documentation
docs/book/
target/doc/
# OS-specific network storage
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
.fseventsd
.Spotlight-V100
.TemporaryItems
# Project-specific (based on your untracked files)
emotion-gradient-config-*.json

8506
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

51
Cargo.toml Normal file
View File

@@ -0,0 +1,51 @@
[workspace]
members = ["crates/lib", "crates/server", "crates/client", "crates/sync-macros"]
resolver = "2"
[workspace.package]
edition = "2024"
[workspace.dependencies]
# Async runtime
tokio = { version = "1", features = ["full"] }
tokio-stream = "0.1"
# Iroh - P2P networking and gossip
iroh = { version = "0.95.0",features = ["discovery-local-network"] }
iroh-gossip = "0.95.0"
# Database
rusqlite = "0.37.0"
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
toml = "0.9"
# Error handling
thiserror = "2.0"
anyhow = "1.0"
# Date/time
chrono = { version = "0.4", features = ["serde"] }
# Logging
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# Random
rand = "0.8"
# ML/AI
candle-core = "0.8"
candle-nn = "0.8"
candle-transformers = "0.8"
tokenizers = "0.20"
hf-hub = "0.3"
# Bevy
bevy = "0.17"
# Synchronization
parking_lot = "0.12"
crdts = "7.3"

18
config.toml Normal file
View File

@@ -0,0 +1,18 @@
[database]
path = "./us.db"
chat_db_path = "./crates/lib/chat.db"
[services]
poll_interval_ms = 1000
training_set_sample_rate = 0.05
[models]
embedding_model = "Qwen/Qwen3-Embedding-0.6B"
emotion_model = "SamLowe/roberta-base-go_emotions"
[tailscale]
hostname = "lonni-daemon"
state_dir = "./tailscale-state"
[grpc]
port = 50051

1
crates/client/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target

43
crates/client/Cargo.toml Normal file
View File

@@ -0,0 +1,43 @@
[package]
name = "client"
version = "0.1.0"
edition.workspace = true
[[bin]]
name = "client"
path = "src/main.rs"
[dependencies]
# Bevy
bevy = { version = "0.17", default-features = false, features = [
"bevy_winit",
"bevy_render",
"bevy_core_pipeline",
"bevy_sprite",
"bevy_ui",
"bevy_text",
"png",
"x11",
] }
# Iroh - P2P networking and gossip
iroh = { workspace = true }
iroh-gossip = { workspace = true }
# Async runtime
tokio = { version = "1", features = ["full"] }
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
# Error handling
thiserror = "2.0"
anyhow = "1.0"
# Logging
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# Local dependencies
lib = { path = "../lib" }

14
crates/client/src/lib.rs Normal file
View File

@@ -0,0 +1,14 @@
pub fn add(left: u64, right: u64) -> u64 {
left + right
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let result = add(2, 2);
assert_eq!(result, 4);
}
}

24
crates/client/src/main.rs Normal file
View File

@@ -0,0 +1,24 @@
use bevy::prelude::*;
use tracing::info;
fn main() {
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.init();
// Start Bevy app
App::new()
.add_plugins(DefaultPlugins)
.add_systems(Startup, setup)
.add_systems(Update, sync_system)
.run();
}
fn setup(mut commands: Commands) {
commands.spawn(Camera2d);
info!("Client started");
}
fn sync_system() {
// TODO: Implement gossip sync for client
}

4
crates/lib/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
/target
chat.db
*.db-shm
*.db-wal

20
crates/lib/Cargo.toml Normal file
View File

@@ -0,0 +1,20 @@
[package]
name = "lib"
version = "0.1.0"
edition.workspace = true
[dependencies]
rusqlite = { version = "0.37.0", features = ["bundled"] }
chrono = { version = "0.4", features = ["serde"] }
thiserror = "2.0"
serde = { version = "1.0", features = ["derive"] }
serde_json.workspace = true
crdts.workspace = true
anyhow.workspace = true
sync-macros = { path = "../sync-macros" }
[dev-dependencies]
tokio.workspace = true
iroh.workspace = true
iroh-gossip.workspace = true
futures-lite = "2.0"

139
crates/lib/src/db.rs Normal file
View File

@@ -0,0 +1,139 @@
use crate::error::Result;
use crate::models::*;
use rusqlite::{Connection, OpenFlags, Row, params};
pub struct ChatDb {
conn: Connection,
}
impl ChatDb {
/// Open a connection to the chat database in read-only mode
pub fn open(path: &str) -> Result<Self> {
let conn = Connection::open_with_flags(path, OpenFlags::SQLITE_OPEN_READ_ONLY)?;
Ok(Self { conn })
}
/// Get messages from the conversation with +31 6 39 13 29 13
///
/// Returns messages from January 1, 2024 to present from the conversation
/// with the specified Dutch phone number.
///
/// # Arguments
///
/// * `start_date` - Start date (defaults to January 1, 2024 if None)
/// * `end_date` - End date (defaults to current time if None)
pub fn get_our_messages(
&self,
start_date: Option<chrono::DateTime<chrono::Utc>>,
end_date: Option<chrono::DateTime<chrono::Utc>>,
) -> Result<Vec<Message>> {
use chrono::{TimeZone, Utc};
// Default date range: January 1, 2024 to now
let start =
start_date.unwrap_or_else(|| Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap());
let end = end_date.unwrap_or_else(|| Utc::now());
// Convert to Apple timestamps (nanoseconds since 2001-01-01)
let start_timestamp = datetime_to_apple_timestamp(start);
let end_timestamp = datetime_to_apple_timestamp(end);
// The phone number might be stored with or without spaces
let phone_with_spaces = "+31 6 39 13 29 13";
let phone_without_spaces = "+31639132913";
// Find the chat with this phone number (try both formats)
let chat = self
.get_chat_for_phone_number(phone_with_spaces)
.or_else(|_| self.get_chat_for_phone_number(phone_without_spaces))?;
// Get messages from this chat within the date range
let mut stmt = self.conn.prepare(
"SELECT m.ROWID, m.guid, m.text, m.service, m.handle_id, m.date, m.date_read, m.date_delivered,
m.is_from_me, m.is_read, m.is_delivered, m.is_sent, m.is_emote, m.is_audio_message,
m.cache_has_attachments, m.associated_message_guid, m.associated_message_type,
m.thread_originator_guid, m.reply_to_guid, m.is_spam
FROM message m
INNER JOIN chat_message_join cmj ON m.ROWID = cmj.message_id
WHERE cmj.chat_id = ?
AND m.date >= ?
AND m.date <= ?
ORDER BY m.date ASC"
)?;
let messages = stmt
.query_map(
params![chat.rowid, start_timestamp, end_timestamp],
map_message_row,
)?
.collect::<std::result::Result<Vec<_>, _>>()?;
Ok(messages)
}
/// Helper function to find the largest chat with a specific phone number
fn get_chat_for_phone_number(&self, phone_number: &str) -> Result<Chat> {
let mut stmt = self.conn.prepare(
"SELECT c.ROWID, c.guid, c.chat_identifier, c.service_name, c.display_name,
c.group_id, c.room_name, c.is_archived, c.is_filtered,
c.last_read_message_timestamp, COUNT(cmj.message_id) as msg_count
FROM chat c
INNER JOIN chat_handle_join chj ON c.ROWID = chj.chat_id
INNER JOIN handle h ON chj.handle_id = h.ROWID
INNER JOIN chat_message_join cmj ON c.ROWID = cmj.chat_id
WHERE h.id = ?
GROUP BY c.ROWID
ORDER BY msg_count DESC
LIMIT 1"
)?;
let chat = stmt.query_row(params![phone_number], |row| {
Ok(Chat {
rowid: row.get(0)?,
guid: row.get(1)?,
chat_identifier: row.get(2)?,
service_name: row.get(3)?,
display_name: row.get(4)?,
group_id: row.get(5)?,
room_name: row.get(6)?,
is_archived: row.get::<_, i64>(7)? != 0,
is_filtered: row.get::<_, i64>(8)? != 0,
last_read_message_timestamp: row.get::<_, Option<i64>>(9)?.map(apple_timestamp_to_datetime),
})
})?;
Ok(chat)
}
}
// Helper function to map database rows to structs
fn map_message_row(row: &Row) -> rusqlite::Result<Message> {
Ok(Message {
rowid: row.get(0)?,
guid: row.get(1)?,
text: row.get(2)?,
service: row.get(3)?,
handle_id: row.get(4)?,
date: row
.get::<_, Option<i64>>(5)?
.map(apple_timestamp_to_datetime),
date_read: row
.get::<_, Option<i64>>(6)?
.map(apple_timestamp_to_datetime),
date_delivered: row
.get::<_, Option<i64>>(7)?
.map(apple_timestamp_to_datetime),
is_from_me: row.get::<_, i64>(8)? != 0,
is_read: row.get::<_, i64>(9)? != 0,
is_delivered: row.get::<_, i64>(10)? != 0,
is_sent: row.get::<_, i64>(11)? != 0,
is_emote: row.get::<_, i64>(12)? != 0,
is_audio_message: row.get::<_, i64>(13)? != 0,
cache_has_attachments: row.get::<_, i64>(14)? != 0,
associated_message_guid: row.get(15)?,
associated_message_type: row.get(16)?,
thread_originator_guid: row.get(17)?,
reply_to_guid: row.get(18)?,
is_spam: row.get::<_, i64>(19)? != 0,
})
}

15
crates/lib/src/error.rs Normal file
View File

@@ -0,0 +1,15 @@
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ChatDbError {
#[error("Database error: {0}")]
Database(#[from] rusqlite::Error),
#[error("Not found: {0}")]
NotFound(String),
#[error("Invalid data: {0}")]
InvalidData(String),
}
pub type Result<T> = std::result::Result<T, ChatDbError>;

30
crates/lib/src/lib.rs Normal file
View File

@@ -0,0 +1,30 @@
//! Data access layer for iMessage chat.db
//!
//! This library provides a read-only interface to query messages from a specific conversation.
//!
//! # Safety
//!
//! All database connections are opened in read-only mode to prevent any
//! accidental modifications to your iMessage database.
//!
//! # Example
//!
//! ```no_run
//! use lib::ChatDb;
//!
//! let db = ChatDb::open("chat.db")?;
//!
//! // Get all messages from January 2024 to now
//! let messages = db.get_our_messages(None, None)?;
//! println!("Found {} messages", messages.len());
//! # Ok::<(), lib::ChatDbError>(())
//! ```
mod error;
mod models;
mod db;
pub mod sync;
pub use error::{ChatDbError, Result};
pub use models::{Message, Chat};
pub use db::ChatDb;

112
crates/lib/src/models.rs Normal file
View File

@@ -0,0 +1,112 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
/// Represents a message in the iMessage database
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Message {
pub rowid: i64,
pub guid: String,
pub text: Option<String>,
pub service: Option<String>,
pub handle_id: i64,
pub date: Option<DateTime<Utc>>,
pub date_read: Option<DateTime<Utc>>,
pub date_delivered: Option<DateTime<Utc>>,
pub is_from_me: bool,
pub is_read: bool,
pub is_delivered: bool,
pub is_sent: bool,
pub is_emote: bool,
pub is_audio_message: bool,
pub cache_has_attachments: bool,
pub associated_message_guid: Option<String>,
pub associated_message_type: i64,
pub thread_originator_guid: Option<String>,
pub reply_to_guid: Option<String>,
pub is_spam: bool,
}
/// Represents a chat/conversation
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Chat {
pub rowid: i64,
pub guid: String,
pub chat_identifier: Option<String>,
pub service_name: Option<String>,
pub display_name: Option<String>,
pub group_id: Option<String>,
pub room_name: Option<String>,
pub is_archived: bool,
pub is_filtered: bool,
pub last_read_message_timestamp: Option<DateTime<Utc>>,
}
/// Helper function to convert Apple's Cocoa timestamp (seconds since 2001-01-01) to DateTime
pub fn apple_timestamp_to_datetime(timestamp: i64) -> DateTime<Utc> {
// Apple's Cocoa timestamps are in nanoseconds since 2001-01-01 00:00:00 UTC
// Convert to Unix timestamp (seconds since 1970-01-01 00:00:00 UTC)
const APPLE_EPOCH_OFFSET: i64 = 978307200; // Seconds between 1970-01-01 and 2001-01-01
let seconds = timestamp / 1_000_000_000 + APPLE_EPOCH_OFFSET;
let nanos = (timestamp % 1_000_000_000) as u32;
DateTime::from_timestamp(seconds, nanos).unwrap_or_else(|| DateTime::from_timestamp(0, 0).unwrap())
}
/// Helper function to convert DateTime to Apple's Cocoa timestamp
pub fn datetime_to_apple_timestamp(dt: DateTime<Utc>) -> i64 {
const APPLE_EPOCH_OFFSET: i64 = 978307200;
let unix_timestamp = dt.timestamp();
let nanos = dt.timestamp_subsec_nanos() as i64;
(unix_timestamp - APPLE_EPOCH_OFFSET) * 1_000_000_000 + nanos
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::{Datelike, TimeZone, Timelike};
#[test]
fn test_apple_timestamp_to_datetime_zero() {
let dt = apple_timestamp_to_datetime(0);
assert_eq!(dt.year(), 2001);
assert_eq!(dt.month(), 1);
assert_eq!(dt.day(), 1);
assert_eq!(dt.hour(), 0);
assert_eq!(dt.minute(), 0);
assert_eq!(dt.second(), 0);
}
#[test]
fn test_apple_timestamp_to_datetime_known_value() {
let timestamp = 694224000000000000i64;
let dt = apple_timestamp_to_datetime(timestamp);
assert_eq!(dt.year(), 2023);
assert_eq!(dt.month(), 1);
assert_eq!(dt.day(), 1);
}
#[test]
fn test_apple_timestamp_roundtrip() {
let original = 694224000000000000i64;
let dt = apple_timestamp_to_datetime(original);
let converted_back = datetime_to_apple_timestamp(dt);
assert_eq!(original, converted_back);
}
#[test]
fn test_datetime_to_apple_timestamp_epoch() {
let dt = Utc.with_ymd_and_hms(2001, 1, 1, 0, 0, 0).unwrap();
let timestamp = datetime_to_apple_timestamp(dt);
assert_eq!(timestamp, 0);
}
#[test]
fn test_negative_apple_timestamp() {
let timestamp = -31536000000000000i64;
let dt = apple_timestamp_to_datetime(timestamp);
assert_eq!(dt.year(), 2000);
}
}

165
crates/lib/src/sync.rs Normal file
View File

@@ -0,0 +1,165 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::ops::{Deref, DerefMut};
// Re-export the macros
pub use sync_macros::{synced, Synced};
// Re-export common CRDT types from the crdts library
pub use crdts::{
ctx::ReadCtx,
lwwreg::LWWReg,
map::Map,
orswot::Orswot,
CmRDT, CvRDT,
};
pub type NodeId = String;
/// Transparent wrapper for synced values
///
/// This wraps any value with LWW semantics but allows you to use it like a normal value
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncedValue<T: Clone> {
value: T,
timestamp: DateTime<Utc>,
node_id: NodeId,
}
impl<T: Clone> SyncedValue<T> {
pub fn new(value: T, node_id: NodeId) -> Self {
Self {
value,
timestamp: Utc::now(),
node_id,
}
}
pub fn get(&self) -> &T {
&self.value
}
pub fn set(&mut self, value: T, node_id: NodeId) {
self.value = value;
self.timestamp = Utc::now();
self.node_id = node_id;
}
pub fn apply_lww(&mut self, value: T, timestamp: DateTime<Utc>, node_id: NodeId) {
if timestamp > self.timestamp || (timestamp == self.timestamp && node_id > self.node_id) {
self.value = value;
self.timestamp = timestamp;
self.node_id = node_id;
}
}
pub fn merge(&mut self, other: &Self) {
self.apply_lww(other.value.clone(), other.timestamp, other.node_id.clone());
}
}
// Allow transparent access to the inner value
impl<T: Clone> Deref for SyncedValue<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<T: Clone> DerefMut for SyncedValue<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.value
}
}
/// Wrapper for a sync message that goes over gossip
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncMessage<T> {
/// Unique message ID
pub message_id: String,
/// Node that sent this
pub node_id: NodeId,
/// When it was sent
pub timestamp: DateTime<Utc>,
/// The actual sync operation
pub operation: T,
}
impl<T: Serialize> SyncMessage<T> {
pub fn new(node_id: NodeId, operation: T) -> Self {
use std::sync::atomic::{AtomicU64, Ordering};
static COUNTER: AtomicU64 = AtomicU64::new(0);
let seq = COUNTER.fetch_add(1, Ordering::SeqCst);
Self {
message_id: format!("{}-{}-{}", node_id, Utc::now().timestamp_millis(), seq),
node_id,
timestamp: Utc::now(),
operation,
}
}
pub fn to_bytes(&self) -> anyhow::Result<Vec<u8>> {
Ok(serde_json::to_vec(self)?)
}
}
impl<T: for<'de> Deserialize<'de>> SyncMessage<T> {
pub fn from_bytes(bytes: &[u8]) -> anyhow::Result<Self> {
Ok(serde_json::from_slice(bytes)?)
}
}
/// Helper trait for types that can be synced
pub trait Syncable: Sized {
type Operation: Serialize + for<'de> Deserialize<'de> + Clone;
/// Apply a sync operation to this value
fn apply_sync_op(&mut self, op: &Self::Operation);
/// Get the node ID for this instance
fn node_id(&self) -> &NodeId;
/// Create a sync message for an operation
fn create_sync_message(&self, op: Self::Operation) -> SyncMessage<Self::Operation> {
SyncMessage::new(self.node_id().clone(), op)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_synced_value() {
let mut val = SyncedValue::new(42, "node1".to_string());
assert_eq!(*val.get(), 42);
val.set(100, "node1".to_string());
assert_eq!(*val.get(), 100);
// Test LWW semantics
let old_time = Utc::now() - chrono::Duration::seconds(10);
val.apply_lww(50, old_time, "node2".to_string());
assert_eq!(*val.get(), 100); // Should not update with older timestamp
}
#[test]
fn test_sync_message() {
#[derive(Debug, Clone, Serialize, Deserialize)]
struct TestOp {
value: i32,
}
let op = TestOp { value: 42 };
let msg = SyncMessage::new("node1".to_string(), op);
let bytes = msg.to_bytes().unwrap();
let decoded = SyncMessage::<TestOp>::from_bytes(&bytes).unwrap();
assert_eq!(decoded.node_id, "node1");
assert_eq!(decoded.operation.value, 42);
}
}

View File

@@ -0,0 +1,98 @@
use lib::{ChatDb, Result};
use chrono::Datelike;
/// Test that we can get messages from the Dutch phone number conversation
#[test]
fn test_get_our_messages_default_range() -> Result<()> {
let db = ChatDb::open("chat.db")?;
// Get messages from January 2024 to now (default)
let messages = db.get_our_messages(None, None)?;
println!("Found {} messages from January 2024 to now", messages.len());
// Verify we got some messages
assert!(messages.len() > 0, "Should find messages in the conversation");
// Verify messages are in chronological order (ASC)
for i in 1..messages.len().min(10) {
if let (Some(prev_date), Some(curr_date)) = (messages[i-1].date, messages[i].date) {
assert!(
prev_date <= curr_date,
"Messages should be in ascending date order"
);
}
}
// Verify all messages are from 2024 or later
for msg in messages.iter().take(10) {
if let Some(date) = msg.date {
assert!(date.year() >= 2024, "Messages should be from 2024 or later");
println!("Message date: {}, from_me: {}, text: {:?}",
date, msg.is_from_me, msg.text.as_ref().map(|s| &s[..s.len().min(50)]));
}
}
Ok(())
}
/// Test that we can get messages with a custom date range
#[test]
fn test_get_our_messages_custom_range() -> Result<()> {
use chrono::{TimeZone, Utc};
let db = ChatDb::open("chat.db")?;
// Get messages from March 2024 to June 2024
let start = Utc.with_ymd_and_hms(2024, 3, 1, 0, 0, 0).unwrap();
let end = Utc.with_ymd_and_hms(2024, 6, 1, 0, 0, 0).unwrap();
let messages = db.get_our_messages(Some(start), Some(end))?;
println!("Found {} messages from March to June 2024", messages.len());
// Verify all messages are within the date range
for msg in &messages {
if let Some(date) = msg.date {
assert!(
date >= start && date <= end,
"Message date {} should be between {} and {}",
date, start, end
);
}
}
Ok(())
}
/// Test displaying a summary of the conversation
#[test]
fn test_conversation_summary() -> Result<()> {
let db = ChatDb::open("chat.db")?;
let messages = db.get_our_messages(None, None)?;
println!("\n=== Conversation Summary ===");
println!("Total messages: {}", messages.len());
let from_me = messages.iter().filter(|m| m.is_from_me).count();
let from_them = messages.len() - from_me;
println!("From me: {}", from_me);
println!("From them: {}", from_them);
// Show first few messages
println!("\nFirst 5 messages:");
for (i, msg) in messages.iter().take(5).enumerate() {
if let Some(date) = msg.date {
let sender = if msg.is_from_me { "Me" } else { "Them" };
let text = msg.text.as_ref()
.map(|t| if t.len() > 60 { format!("{}...", &t[..60]) } else { t.clone() })
.unwrap_or_else(|| "[No text]".to_string());
println!("{}. {} ({}): {}", i + 1, date.format("%Y-%m-%d %H:%M"), sender, text);
}
}
Ok(())
}

View File

@@ -0,0 +1,157 @@
use lib::sync::{synced, SyncMessage, Syncable};
use iroh::{Endpoint, protocol::{Router, ProtocolHandler, AcceptError}};
use anyhow::Result;
use std::sync::Arc;
use tokio::sync::Mutex;
/// Test configuration that can be synced
#[synced]
struct TestConfig {
value: i32,
name: String,
#[sync(skip)]
node_id: String,
}
/// ALPN identifier for our sync protocol
const SYNC_ALPN: &[u8] = b"/lonni/sync/1";
/// Protocol handler for receiving sync messages
#[derive(Debug, Clone)]
struct SyncProtocol {
config: Arc<Mutex<TestConfig>>,
}
impl ProtocolHandler for SyncProtocol {
async fn accept(&self, connection: iroh::endpoint::Connection) -> Result<(), AcceptError> {
println!("Accepting connection from: {}", connection.remote_id());
// Accept the bidirectional stream
let (mut send, mut recv) = connection.accept_bi().await
.map_err(AcceptError::from_err)?;
println!("Stream accepted, reading message...");
// Read the sync message
let bytes = recv.read_to_end(1024 * 1024).await
.map_err(AcceptError::from_err)?;
println!("Received {} bytes", bytes.len());
// Deserialize and apply
let msg = SyncMessage::<TestConfigOp>::from_bytes(&bytes)
.map_err(|e| AcceptError::from_err(std::io::Error::new(std::io::ErrorKind::InvalidData, e)))?;
println!("Applying operation from node: {}", msg.node_id);
let mut config = self.config.lock().await;
config.apply_op(&msg.operation);
println!("Operation applied successfully");
// Close the stream
send.finish()
.map_err(AcceptError::from_err)?;
Ok(())
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_sync_between_two_nodes() -> Result<()> {
println!("\n=== Testing Sync Between Two Nodes ===\n");
// Create two endpoints
let node1 = Endpoint::builder().bind().await?;
let node2 = Endpoint::builder().bind().await?;
let node1_addr = node1.addr();
let node2_addr = node2.addr();
let node1_id = node1_addr.id.to_string();
let node2_id = node2_addr.id.to_string();
println!("Node 1: {}", node1_id);
println!("Node 2: {}", node2_id);
// Create synced configs on both nodes
let mut config1 = TestConfig::new(
42,
"initial".to_string(),
node1_id.clone(),
);
let config2 = TestConfig::new(
42,
"initial".to_string(),
node2_id.clone(),
);
let config2_shared = Arc::new(Mutex::new(config2));
println!("\nInitial state:");
println!(" Node 1: value={}, name={}", config1.value(), config1.name());
{
let config2 = config2_shared.lock().await;
println!(" Node 2: value={}, name={}", config2.value(), config2.name());
}
// Set up router on node2 to accept incoming connections
println!("\nSetting up node2 router...");
let protocol = SyncProtocol {
config: config2_shared.clone(),
};
let router = Router::builder(node2)
.accept(SYNC_ALPN, protocol)
.spawn();
router.endpoint().online().await;
println!("✓ Node2 router ready");
// Node 1 changes the value
println!("\nNode 1 changing value to 100...");
let op = config1.set_value(100);
// Serialize the operation
let sync_msg = SyncMessage::new(node1_id.clone(), op);
let bytes = sync_msg.to_bytes()?;
println!("Serialized to {} bytes", bytes.len());
// Establish QUIC connection from node1 to node2
println!("\nEstablishing QUIC connection...");
let conn = node1.connect(node2_addr.clone(), SYNC_ALPN).await?;
println!("✓ Connection established");
// Open a bidirectional stream
let (mut send, _recv) = conn.open_bi().await?;
// Send the sync message
println!("Sending sync message...");
send.write_all(&bytes).await?;
send.finish()?;
println!("✓ Message sent");
// Wait a bit for the message to be processed
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
// Verify both configs have the same value
println!("\nFinal state:");
println!(" Node 1: value={}, name={}", config1.value(), config1.name());
{
let config2 = config2_shared.lock().await;
println!(" Node 2: value={}, name={}", config2.value(), config2.name());
assert_eq!(*config1.value(), 100);
assert_eq!(*config2.value(), 100);
assert_eq!(config1.name(), "initial");
assert_eq!(config2.name(), "initial");
}
println!("\n✓ Sync successful!");
// Cleanup
router.shutdown().await?;
node1.close().await;
Ok(())
}

58
crates/server/Cargo.toml Normal file
View File

@@ -0,0 +1,58 @@
[package]
name = "server"
version = "0.1.0"
edition.workspace = true
[[bin]]
name = "server"
path = "src/main.rs"
[dependencies]
# Bevy (headless)
bevy = { version = "0.17", default-features = false, features = [
"bevy_state",
] }
# Iroh - P2P networking and gossip
iroh = { workspace = true }
iroh-gossip = { workspace = true }
# Async runtime
tokio = { version = "1", features = ["full"] }
tokio-stream = "0.1"
futures-lite = "2.5"
# Database
rusqlite = { version = "0.37.0", features = ["bundled", "column_decltype", "load_extension"] }
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
toml = "0.9"
# Logging
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# Error handling
thiserror = "2.0"
anyhow = "1.0"
# Date/time
chrono = { version = "0.4", features = ["serde"] }
# Random number generation
rand = "0.8"
# ML/AI - Candle for inference (using newer versions with better compatibility)
candle-core = "0.8"
candle-nn = "0.8"
candle-transformers = "0.8"
tokenizers = "0.20"
hf-hub = "0.3"
# Synchronization
parking_lot = { workspace = true }
# Local dependencies
lib = { path = "../lib" }

View File

@@ -0,0 +1 @@
// Asset loading and management will go here

View File

@@ -0,0 +1,14 @@
use bevy::prelude::*;
use parking_lot::Mutex;
use rusqlite::Connection;
use std::sync::Arc;
use crate::config::Config;
/// Bevy resource wrapping application configuration
#[derive(Resource)]
pub struct AppConfig(pub Config);
/// Bevy resource wrapping database connection
#[derive(Resource)]
pub struct Database(pub Arc<Mutex<Connection>>);

View File

@@ -0,0 +1,87 @@
use bevy::prelude::*;
use iroh::protocol::Router;
use iroh::Endpoint;
use iroh_gossip::api::{GossipReceiver, GossipSender};
use iroh_gossip::net::Gossip;
use iroh_gossip::proto::TopicId;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
/// Message envelope for gossip sync
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncMessage {
/// The actual message from iMessage
pub message: lib::Message,
/// Timestamp when this was published to gossip
pub sync_timestamp: i64,
/// ID of the node that published this
pub publisher_node_id: String,
}
/// Bevy resource wrapping the gossip handle
#[derive(Resource, Clone)]
pub struct IrohGossipHandle {
pub gossip: Gossip,
}
/// Bevy resource wrapping the gossip sender
#[derive(Resource)]
pub struct IrohGossipSender {
pub sender: Arc<Mutex<GossipSender>>,
}
/// Bevy resource wrapping the gossip receiver
#[derive(Resource)]
pub struct IrohGossipReceiver {
pub receiver: Arc<Mutex<GossipReceiver>>,
}
/// Bevy resource with Iroh router
#[derive(Resource)]
pub struct IrohRouter {
pub router: Router,
}
/// Bevy resource with Iroh endpoint
#[derive(Resource, Clone)]
pub struct IrohEndpoint {
pub endpoint: Endpoint,
pub node_id: String,
}
/// Bevy resource for gossip topic ID
#[derive(Resource)]
pub struct GossipTopic(pub TopicId);
/// Bevy resource for tracking gossip initialization task
#[derive(Resource)]
pub struct GossipInitTask(pub bevy::tasks::Task<Option<(
Endpoint,
Gossip,
Router,
GossipSender,
GossipReceiver,
)>>);
/// Bevy message: a new message that needs to be published to gossip
#[derive(Message, Clone, Debug)]
pub struct PublishMessageEvent {
pub message: lib::Message,
}
/// Bevy message: a message received from gossip that needs to be saved to SQLite
#[derive(Message, Clone, Debug)]
pub struct GossipMessageReceived {
pub sync_message: SyncMessage,
}
/// Helper to serialize a sync message
pub fn serialize_sync_message(msg: &SyncMessage) -> anyhow::Result<Vec<u8>> {
Ok(serde_json::to_vec(msg)?)
}
/// Helper to deserialize a sync message
pub fn deserialize_sync_message(data: &[u8]) -> anyhow::Result<SyncMessage> {
Ok(serde_json::from_slice(data)?)
}

View File

@@ -0,0 +1,5 @@
pub mod database;
pub mod gossip;
pub use database::*;
pub use gossip::*;

View File

@@ -0,0 +1,84 @@
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::Path;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub database: DatabaseConfig,
pub services: ServicesConfig,
pub models: ModelsConfig,
pub tailscale: TailscaleConfig,
pub grpc: GrpcConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DatabaseConfig {
pub path: String,
pub chat_db_path: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServicesConfig {
pub poll_interval_ms: u64,
pub training_set_sample_rate: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ModelsConfig {
pub embedding_model: String,
pub emotion_model: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TailscaleConfig {
pub hostname: String,
pub state_dir: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GrpcConfig {
pub port: u16,
}
impl Config {
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self> {
let content = fs::read_to_string(path.as_ref())
.context(format!("Failed to read config file: {:?}", path.as_ref()))?;
let config: Config = toml::from_str(&content)
.context("Failed to parse config file")?;
Ok(config)
}
pub fn default_config() -> Self {
Self {
database: DatabaseConfig {
path: "./us.db".to_string(),
chat_db_path: "./crates/lib/chat.db".to_string(),
},
services: ServicesConfig {
poll_interval_ms: 1000,
training_set_sample_rate: 0.05,
},
models: ModelsConfig {
embedding_model: "Qwen/Qwen3-Embedding-0.6B".to_string(),
emotion_model: "SamLowe/roberta-base-go_emotions".to_string(),
},
tailscale: TailscaleConfig {
hostname: "lonni-daemon".to_string(),
state_dir: "./tailscale-state".to_string(),
},
grpc: GrpcConfig {
port: 50051,
},
}
}
pub fn save<P: AsRef<Path>>(&self, path: P) -> Result<()> {
let content = toml::to_string_pretty(self)
.context("Failed to serialize config")?;
fs::write(path.as_ref(), content)
.context(format!("Failed to write config file: {:?}", path.as_ref()))?;
Ok(())
}
}

View File

@@ -0,0 +1,5 @@
pub mod operations;
pub mod schema;
pub use operations::*;
pub use schema::*;

View File

@@ -0,0 +1,321 @@
use crate::db::schema::{deserialize_embedding, serialize_embedding};
use crate::models::*;
use chrono::{TimeZone, Utc};
use rusqlite::{params, Connection, OptionalExtension, Result, Row};
/// Insert a new message into the database
pub fn insert_message(conn: &Connection, msg: &lib::Message) -> Result<i64> {
let timestamp = msg.date.map(|dt| dt.timestamp());
let created_at = Utc::now().timestamp();
conn.execute(
"INSERT INTO messages (chat_db_rowid, text, timestamp, is_from_me, created_at)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT(chat_db_rowid) DO NOTHING",
params![msg.rowid, msg.text, timestamp, msg.is_from_me, created_at],
)?;
Ok(conn.last_insert_rowid())
}
/// Get message ID by chat.db rowid
pub fn get_message_id_by_chat_rowid(conn: &Connection, chat_db_rowid: i64) -> Result<Option<i64>> {
conn.query_row(
"SELECT id FROM messages WHERE chat_db_rowid = ?1",
params![chat_db_rowid],
|row| row.get(0),
)
.optional()
}
/// Get message by ID
pub fn get_message(conn: &Connection, id: i64) -> Result<Message> {
conn.query_row(
"SELECT id, chat_db_rowid, text, timestamp, is_from_me, created_at FROM messages WHERE id = ?1",
params![id],
map_message_row,
)
}
fn map_message_row(row: &Row) -> Result<Message> {
let timestamp: Option<i64> = row.get(3)?;
let created_at: i64 = row.get(5)?;
Ok(Message {
id: row.get(0)?,
chat_db_rowid: row.get(1)?,
text: row.get(2)?,
timestamp: timestamp.map(|ts| Utc.timestamp_opt(ts, 0).unwrap()),
is_from_me: row.get(4)?,
created_at: Utc.timestamp_opt(created_at, 0).unwrap(),
})
}
/// Insert message embedding
pub fn insert_message_embedding(
conn: &Connection,
message_id: i64,
embedding: &[f32],
model_name: &str,
) -> Result<i64> {
let embedding_bytes = serialize_embedding(embedding);
let created_at = Utc::now().timestamp();
conn.execute(
"INSERT INTO message_embeddings (message_id, embedding, model_name, created_at)
VALUES (?1, ?2, ?3, ?4)",
params![message_id, embedding_bytes, model_name, created_at],
)?;
Ok(conn.last_insert_rowid())
}
/// Get message embedding
pub fn get_message_embedding(conn: &Connection, message_id: i64) -> Result<Option<MessageEmbedding>> {
conn.query_row(
"SELECT id, message_id, embedding, model_name, created_at
FROM message_embeddings WHERE message_id = ?1",
params![message_id],
|row| {
let embedding_bytes: Vec<u8> = row.get(2)?;
let created_at: i64 = row.get(4)?;
Ok(MessageEmbedding {
id: row.get(0)?,
message_id: row.get(1)?,
embedding: deserialize_embedding(&embedding_bytes),
model_name: row.get(3)?,
created_at: Utc.timestamp_opt(created_at, 0).unwrap(),
})
},
)
.optional()
}
/// Insert or get word embedding
pub fn insert_word_embedding(
conn: &Connection,
word: &str,
embedding: &[f32],
model_name: &str,
) -> Result<i64> {
let embedding_bytes = serialize_embedding(embedding);
let created_at = Utc::now().timestamp();
conn.execute(
"INSERT INTO word_embeddings (word, embedding, model_name, created_at)
VALUES (?1, ?2, ?3, ?4)
ON CONFLICT(word) DO NOTHING",
params![word, embedding_bytes, model_name, created_at],
)?;
Ok(conn.last_insert_rowid())
}
/// Get word embedding
pub fn get_word_embedding(conn: &Connection, word: &str) -> Result<Option<WordEmbedding>> {
conn.query_row(
"SELECT id, word, embedding, model_name, created_at
FROM word_embeddings WHERE word = ?1",
params![word],
|row| {
let embedding_bytes: Vec<u8> = row.get(2)?;
let created_at: i64 = row.get(4)?;
Ok(WordEmbedding {
id: row.get(0)?,
word: row.get(1)?,
embedding: deserialize_embedding(&embedding_bytes),
model_name: row.get(3)?,
created_at: Utc.timestamp_opt(created_at, 0).unwrap(),
})
},
)
.optional()
}
/// Insert emotion classification
pub fn insert_emotion(
conn: &Connection,
message_id: i64,
emotion: &str,
confidence: f64,
model_version: &str,
) -> Result<i64> {
let now = Utc::now().timestamp();
conn.execute(
"INSERT INTO emotions (message_id, emotion, confidence, model_version, created_at, updated_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![message_id, emotion, confidence, model_version, now, now],
)?;
Ok(conn.last_insert_rowid())
}
/// Update emotion classification
pub fn update_emotion(
conn: &Connection,
message_id: i64,
emotion: &str,
confidence: f64,
) -> Result<()> {
let updated_at = Utc::now().timestamp();
conn.execute(
"UPDATE emotions SET emotion = ?1, confidence = ?2, updated_at = ?3
WHERE message_id = ?4",
params![emotion, confidence, updated_at, message_id],
)?;
Ok(())
}
/// Get emotion by message ID
pub fn get_emotion_by_message_id(conn: &Connection, message_id: i64) -> Result<Option<Emotion>> {
conn.query_row(
"SELECT id, message_id, emotion, confidence, model_version, created_at, updated_at
FROM emotions WHERE message_id = ?1",
params![message_id],
map_emotion_row,
)
.optional()
}
/// Get emotion by ID
pub fn get_emotion_by_id(conn: &Connection, id: i64) -> Result<Option<Emotion>> {
conn.query_row(
"SELECT id, message_id, emotion, confidence, model_version, created_at, updated_at
FROM emotions WHERE id = ?1",
params![id],
map_emotion_row,
)
.optional()
}
/// List all emotions with optional filters
pub fn list_emotions(
conn: &Connection,
emotion_filter: Option<&str>,
min_confidence: Option<f64>,
limit: Option<i32>,
offset: Option<i32>,
) -> Result<Vec<Emotion>> {
let mut query = String::from(
"SELECT id, message_id, emotion, confidence, model_version, created_at, updated_at
FROM emotions WHERE 1=1"
);
if emotion_filter.is_some() {
query.push_str(" AND emotion = ?1");
}
if min_confidence.is_some() {
query.push_str(" AND confidence >= ?2");
}
query.push_str(" ORDER BY created_at DESC");
if limit.is_some() {
query.push_str(" LIMIT ?3");
}
if offset.is_some() {
query.push_str(" OFFSET ?4");
}
let mut stmt = conn.prepare(&query)?;
let emotions = stmt
.query_map(
params![
emotion_filter.unwrap_or(""),
min_confidence.unwrap_or(0.0),
limit.unwrap_or(1000),
offset.unwrap_or(0),
],
map_emotion_row,
)?
.collect::<Result<Vec<_>>>()?;
Ok(emotions)
}
/// Delete emotion by ID
pub fn delete_emotion(conn: &Connection, id: i64) -> Result<()> {
conn.execute("DELETE FROM emotions WHERE id = ?1", params![id])?;
Ok(())
}
/// Count total emotions
pub fn count_emotions(conn: &Connection) -> Result<i32> {
conn.query_row("SELECT COUNT(*) FROM emotions", [], |row| row.get(0))
}
fn map_emotion_row(row: &Row) -> Result<Emotion> {
let created_at: i64 = row.get(5)?;
let updated_at: i64 = row.get(6)?;
Ok(Emotion {
id: row.get(0)?,
message_id: row.get(1)?,
emotion: row.get(2)?,
confidence: row.get(3)?,
model_version: row.get(4)?,
created_at: Utc.timestamp_opt(created_at, 0).unwrap(),
updated_at: Utc.timestamp_opt(updated_at, 0).unwrap(),
})
}
/// Insert emotion training sample
pub fn insert_training_sample(
conn: &Connection,
message_id: Option<i64>,
text: &str,
expected_emotion: &str,
) -> Result<i64> {
let now = Utc::now().timestamp();
conn.execute(
"INSERT INTO emotions_training_set (message_id, text, expected_emotion, created_at, updated_at)
VALUES (?1, ?2, ?3, ?4, ?5)",
params![message_id, text, expected_emotion, now, now],
)?;
Ok(conn.last_insert_rowid())
}
/// Get state value from daemon_state table
pub fn get_state(conn: &Connection, key: &str) -> Result<Option<String>> {
conn.query_row(
"SELECT value FROM daemon_state WHERE key = ?1",
params![key],
|row| row.get(0),
)
.optional()
}
/// Set state value in daemon_state table
pub fn set_state(conn: &Connection, key: &str, value: &str) -> Result<()> {
let updated_at = Utc::now().timestamp();
conn.execute(
"INSERT INTO daemon_state (key, value, updated_at)
VALUES (?1, ?2, ?3)
ON CONFLICT(key) DO UPDATE SET value = ?2, updated_at = ?3",
params![key, value, updated_at],
)?;
Ok(())
}
/// Get last processed chat.db rowid from database or return 0
pub fn get_last_processed_rowid(conn: &Connection) -> Result<i64> {
Ok(get_state(conn, "last_processed_rowid")?
.and_then(|s| s.parse().ok())
.unwrap_or(0))
}
/// Save last processed chat.db rowid to database
pub fn save_last_processed_rowid(conn: &Connection, rowid: i64) -> Result<()> {
set_state(conn, "last_processed_rowid", &rowid.to_string())
}

View File

@@ -0,0 +1,207 @@
use rusqlite::{Connection, Result};
use tracing::info;
pub fn initialize_database(conn: &Connection) -> Result<()> {
info!("Initializing database schema");
// Load sqlite-vec extension (macOS only)
let vec_path = "./extensions/vec0.dylib";
// Try to load the vector extension (non-fatal if it fails for now)
match unsafe { conn.load_extension_enable() } {
Ok(_) => {
match unsafe { conn.load_extension(vec_path, None::<&str>) } {
Ok(_) => info!("Loaded sqlite-vec extension"),
Err(e) => info!("Could not load sqlite-vec extension: {}. Vector operations will not be available.", e),
}
let _ = unsafe { conn.load_extension_disable() };
}
Err(e) => info!("Extension loading not enabled: {}", e),
}
// Create messages table
conn.execute(
"CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
chat_db_rowid INTEGER UNIQUE NOT NULL,
text TEXT,
timestamp INTEGER,
is_from_me BOOLEAN NOT NULL,
created_at INTEGER NOT NULL
)",
[],
)?;
// Create index on chat_db_rowid for fast lookups
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_messages_chat_db_rowid ON messages(chat_db_rowid)",
[],
)?;
// Create message_embeddings table
conn.execute(
"CREATE TABLE IF NOT EXISTS message_embeddings (
id INTEGER PRIMARY KEY AUTOINCREMENT,
message_id INTEGER NOT NULL,
embedding BLOB NOT NULL,
model_name TEXT NOT NULL,
created_at INTEGER NOT NULL,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
)",
[],
)?;
// Create index on message_id
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_message_embeddings_message_id ON message_embeddings(message_id)",
[],
)?;
// Create word_embeddings table
conn.execute(
"CREATE TABLE IF NOT EXISTS word_embeddings (
id INTEGER PRIMARY KEY AUTOINCREMENT,
word TEXT UNIQUE NOT NULL,
embedding BLOB NOT NULL,
model_name TEXT NOT NULL,
created_at INTEGER NOT NULL
)",
[],
)?;
// Create index on word
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_word_embeddings_word ON word_embeddings(word)",
[],
)?;
// Create emotions table
conn.execute(
"CREATE TABLE IF NOT EXISTS emotions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
message_id INTEGER NOT NULL,
emotion TEXT NOT NULL,
confidence REAL NOT NULL,
model_version TEXT NOT NULL,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
)",
[],
)?;
// Create indexes for emotions
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_emotions_message_id ON emotions(message_id)",
[],
)?;
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_emotions_emotion ON emotions(emotion)",
[],
)?;
// Create emotions_training_set table
conn.execute(
"CREATE TABLE IF NOT EXISTS emotions_training_set (
id INTEGER PRIMARY KEY AUTOINCREMENT,
message_id INTEGER,
text TEXT NOT NULL,
expected_emotion TEXT NOT NULL,
actual_emotion TEXT,
confidence REAL,
is_validated BOOLEAN NOT NULL DEFAULT 0,
notes TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE SET NULL
)",
[],
)?;
// Create index on emotions_training_set
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_emotions_training_set_message_id ON emotions_training_set(message_id)",
[],
)?;
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_emotions_training_set_validated ON emotions_training_set(is_validated)",
[],
)?;
// Create state table for daemon state persistence
conn.execute(
"CREATE TABLE IF NOT EXISTS daemon_state (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at INTEGER NOT NULL
)",
[],
)?;
// Create models table for storing ML model files
conn.execute(
"CREATE TABLE IF NOT EXISTS models (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT UNIQUE NOT NULL,
model_type TEXT NOT NULL,
version TEXT NOT NULL,
file_data BLOB NOT NULL,
metadata TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL
)",
[],
)?;
// Create index on model name and type
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_models_name ON models(name)",
[],
)?;
conn.execute(
"CREATE INDEX IF NOT EXISTS idx_models_type ON models(model_type)",
[],
)?;
info!("Database schema initialized successfully");
Ok(())
}
/// Helper function to serialize f32 vector to bytes for storage
pub fn serialize_embedding(embedding: &[f32]) -> Vec<u8> {
embedding
.iter()
.flat_map(|f| f.to_le_bytes())
.collect()
}
/// Helper function to deserialize bytes back to f32 vector
pub fn deserialize_embedding(bytes: &[u8]) -> Vec<f32> {
bytes
.chunks_exact(4)
.map(|chunk| {
let array: [u8; 4] = chunk.try_into().unwrap();
f32::from_le_bytes(array)
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_embedding_serialization() {
let original = vec![1.0f32, 2.5, -3.7, 0.0, 100.5];
let serialized = serialize_embedding(&original);
let deserialized = deserialize_embedding(&serialized);
assert_eq!(original.len(), deserialized.len());
for (a, b) in original.iter().zip(deserialized.iter()) {
assert!((a - b).abs() < 1e-6);
}
}
}

View File

@@ -0,0 +1 @@
// Entity builders and spawners will go here

View File

@@ -0,0 +1,42 @@
use anyhow::Result;
use iroh::protocol::Router;
use iroh::Endpoint;
use iroh_gossip::api::{GossipReceiver, GossipSender};
use iroh_gossip::net::Gossip;
use iroh_gossip::proto::TopicId;
/// Initialize Iroh endpoint and gossip for the given topic
pub async fn init_iroh_gossip(
topic_id: TopicId,
) -> Result<(Endpoint, Gossip, Router, GossipSender, GossipReceiver)> {
println!("Initializing Iroh endpoint...");
// Create the Iroh endpoint
let endpoint = Endpoint::bind().await?;
println!("Endpoint created");
// Build the gossip protocol
println!("Building gossip protocol...");
let gossip = Gossip::builder().spawn(endpoint.clone());
// Setup the router to handle incoming connections
println!("Setting up router...");
let router = Router::builder(endpoint.clone())
.accept(iroh_gossip::ALPN, gossip.clone())
.spawn();
// Subscribe to the topic (no bootstrap peers for now)
println!("Subscribing to topic: {:?}", topic_id);
let bootstrap_peers = vec![];
let subscribe_handle = gossip.subscribe(topic_id, bootstrap_peers).await?;
// Split into sender and receiver
let (sender, mut receiver) = subscribe_handle.split();
// Wait for join to complete
println!("Waiting for gossip join...");
receiver.joined().await?;
println!("Gossip initialized successfully");
Ok((endpoint, gossip, router, sender, receiver))
}

96
crates/server/src/main.rs Normal file
View File

@@ -0,0 +1,96 @@
mod assets;
mod components;
mod config;
mod db;
mod entities;
mod iroh_sync;
mod models;
mod services;
mod systems;
use anyhow::{Context, Result};
use bevy::prelude::*;
use config::Config;
use iroh_gossip::proto::TopicId;
use parking_lot::Mutex;
use rusqlite::Connection;
use std::path::Path;
use std::sync::Arc;
// Re-export init function
pub use iroh_sync::init_iroh_gossip;
// Import components and systems
use components::*;
use systems::*;
fn main() {
println!("Starting server");
// Load configuration and initialize database
let (config, us_db) = match initialize_app() {
Ok(data) => data,
Err(e) => {
eprintln!("Failed to initialize app: {}", e);
return;
}
};
// Create a topic ID for gossip (use a fixed topic for now)
let mut topic_bytes = [0u8; 32];
topic_bytes[..10].copy_from_slice(b"us-sync-v1");
let topic_id = TopicId::from_bytes(topic_bytes);
// Start Bevy app (headless)
App::new()
.add_plugins(MinimalPlugins)
.add_message::<PublishMessageEvent>()
.add_message::<GossipMessageReceived>()
.insert_resource(AppConfig(config))
.insert_resource(Database(us_db))
.insert_resource(GossipTopic(topic_id))
.add_systems(Startup, (setup_database, setup_gossip))
.add_systems(
Update,
(
poll_gossip_init,
poll_chat_db,
detect_new_messages,
publish_to_gossip,
receive_from_gossip,
save_gossip_messages,
),
)
.run();
}
/// Initialize configuration and database
fn initialize_app() -> Result<(Config, Arc<Mutex<Connection>>)> {
let config = if Path::new("config.toml").exists() {
println!("Loading config from config.toml");
Config::from_file("config.toml")?
} else {
println!("No config.toml found, using default configuration");
let config = Config::default_config();
config
.save("config.toml")
.context("Failed to save default config")?;
println!("Saved default configuration to config.toml");
config
};
println!("Configuration loaded");
println!(" Database: {}", config.database.path);
println!(" Chat DB: {}", config.database.chat_db_path);
// Initialize database
println!("Initializing database at {}", config.database.path);
let conn =
Connection::open(&config.database.path).context("Failed to open database")?;
db::initialize_database(&conn).context("Failed to initialize database schema")?;
let us_db = Arc::new(Mutex::new(conn));
Ok((config, us_db))
}

View File

@@ -0,0 +1,60 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
/// Represents a message stored in our database
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Message {
pub id: i64,
pub chat_db_rowid: i64,
pub text: Option<String>,
pub timestamp: Option<DateTime<Utc>>,
pub is_from_me: bool,
pub created_at: DateTime<Utc>,
}
/// Represents a message embedding (full message vector)
#[derive(Debug, Clone)]
pub struct MessageEmbedding {
pub id: i64,
pub message_id: i64,
pub embedding: Vec<f32>,
pub model_name: String,
pub created_at: DateTime<Utc>,
}
/// Represents a word embedding
#[derive(Debug, Clone)]
pub struct WordEmbedding {
pub id: i64,
pub word: String,
pub embedding: Vec<f32>,
pub model_name: String,
pub created_at: DateTime<Utc>,
}
/// Represents an emotion classification for a message
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Emotion {
pub id: i64,
pub message_id: i64,
pub emotion: String,
pub confidence: f64,
pub model_version: String,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
/// Represents an emotion training sample
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EmotionTrainingSample {
pub id: i64,
pub message_id: Option<i64>,
pub text: String,
pub expected_emotion: String,
pub actual_emotion: Option<String>,
pub confidence: Option<f64>,
pub is_validated: bool,
pub notes: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}

View File

@@ -0,0 +1,72 @@
syntax = "proto3";
package emotions;
// Emotion classification for a message
message Emotion {
int64 id = 1;
int64 message_id = 2;
string emotion = 3;
double confidence = 4;
string model_version = 5;
int64 created_at = 6;
int64 updated_at = 7;
}
// Request to get a single emotion by message ID
message GetEmotionRequest {
int64 message_id = 1;
}
// Request to get multiple emotions with optional filters
message GetEmotionsRequest {
repeated int64 message_ids = 1;
optional string emotion_filter = 2;
optional double min_confidence = 3;
optional int32 limit = 4;
optional int32 offset = 5;
}
// Response containing multiple emotions
message EmotionsResponse {
repeated Emotion emotions = 1;
int32 total_count = 2;
}
// Request to update an emotion (for corrections/fine-tuning)
message UpdateEmotionRequest {
int64 message_id = 1;
string emotion = 2;
double confidence = 3;
optional string notes = 4;
}
// Request to delete an emotion
message DeleteEmotionRequest {
int64 id = 1;
}
// Generic response for mutations
message EmotionResponse {
bool success = 1;
string message = 2;
optional Emotion emotion = 3;
}
// Empty message for list all
message Empty {}
// The emotion service with full CRUD operations
service EmotionService {
// Read operations
rpc GetEmotion(GetEmotionRequest) returns (Emotion);
rpc GetEmotions(GetEmotionsRequest) returns (EmotionsResponse);
rpc ListAllEmotions(Empty) returns (EmotionsResponse);
// Update operations (for classification corrections and fine-tuning)
rpc UpdateEmotion(UpdateEmotionRequest) returns (EmotionResponse);
rpc BatchUpdateEmotions(stream UpdateEmotionRequest) returns (EmotionResponse);
// Delete operation
rpc DeleteEmotion(DeleteEmotionRequest) returns (EmotionResponse);
}

View File

@@ -0,0 +1,121 @@
use crate::db;
use anyhow::{Context, Result};
use chrono::Utc;
use rusqlite::Connection;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::{mpsc, Mutex};
use tokio::time;
use tracing::{debug, error, info, warn};
pub struct ChatPollerService {
chat_db_path: String,
us_db: Arc<Mutex<Connection>>,
tx: mpsc::Sender<lib::Message>,
poll_interval: Duration,
}
impl ChatPollerService {
pub fn new(
chat_db_path: String,
us_db: Arc<Mutex<Connection>>,
tx: mpsc::Sender<lib::Message>,
poll_interval_ms: u64,
) -> Self {
Self {
chat_db_path,
us_db,
tx,
poll_interval: Duration::from_millis(poll_interval_ms),
}
}
pub async fn run(&self) -> Result<()> {
info!("Starting chat poller service");
info!("Polling {} every {:?}", self.chat_db_path, self.poll_interval);
// Get last processed rowid from database
let us_db = self.us_db.lock().await;
let mut last_rowid = db::get_last_processed_rowid(&us_db)
.context("Failed to get last processed rowid")?;
drop(us_db);
info!("Starting from rowid: {}", last_rowid);
let mut interval = time::interval(self.poll_interval);
loop {
interval.tick().await;
match self.poll_messages(last_rowid).await {
Ok(new_messages) => {
if !new_messages.is_empty() {
info!("Found {} new messages", new_messages.len());
for msg in new_messages {
// Update last_rowid
if msg.rowid > last_rowid {
last_rowid = msg.rowid;
}
// Send message to processing pipeline
if let Err(e) = self.tx.send(msg).await {
error!("Failed to send message to processing pipeline: {}", e);
}
}
// Save state to database
let us_db = self.us_db.lock().await;
if let Err(e) = db::save_last_processed_rowid(&us_db, last_rowid) {
warn!("Failed to save last processed rowid: {}", e);
}
drop(us_db);
} else {
debug!("No new messages");
}
}
Err(e) => {
error!("Error polling messages: {}", e);
}
}
}
}
async fn poll_messages(&self, last_rowid: i64) -> Result<Vec<lib::Message>> {
// Check if chat.db exists
if !Path::new(&self.chat_db_path).exists() {
return Err(anyhow::anyhow!("chat.db not found at {}", self.chat_db_path));
}
// Open chat.db (read-only)
let chat_db = lib::ChatDb::open(&self.chat_db_path)
.context("Failed to open chat.db")?;
// Get messages with rowid > last_rowid
// We'll use the existing get_our_messages but need to filter by rowid
// For now, let's get recent messages and filter in-memory
let start_date = Some(Utc::now() - chrono::Duration::days(7));
let end_date = Some(Utc::now());
let messages = chat_db
.get_our_messages(start_date, end_date)
.context("Failed to get messages from chat.db")?;
// Filter messages with rowid > last_rowid and ensure they're not duplicates
let new_messages: Vec<lib::Message> = messages
.into_iter()
.filter(|msg| msg.rowid > last_rowid)
.collect();
// Insert new messages into our database
let us_db = self.us_db.lock().await;
for msg in &new_messages {
if let Err(e) = db::insert_message(&us_db, msg) {
warn!("Failed to insert message {}: {}", msg.rowid, e);
}
}
Ok(new_messages)
}
}

View File

@@ -0,0 +1,110 @@
use crate::db;
use anyhow::Result;
use rusqlite::Connection;
use std::sync::Arc;
use tokio::sync::{mpsc, Mutex};
use tracing::{error, info, warn};
/// Service responsible for generating embeddings for messages and words
pub struct EmbeddingService {
us_db: Arc<Mutex<Connection>>,
rx: mpsc::Receiver<lib::Message>,
model_name: String,
}
impl EmbeddingService {
pub fn new(
us_db: Arc<Mutex<Connection>>,
rx: mpsc::Receiver<lib::Message>,
model_name: String,
) -> Self {
Self {
us_db,
rx,
model_name,
}
}
pub async fn run(mut self) -> Result<()> {
info!("Starting embedding service with model: {}", self.model_name);
// TODO: Load the embedding model here
// For now, we'll create a placeholder implementation
info!("Loading embedding model...");
// let model = load_embedding_model(&self.model_name)?;
info!("Embedding model loaded (placeholder)");
while let Some(msg) = self.rx.recv().await {
if let Err(e) = self.process_message(&msg).await {
error!("Error processing message {}: {}", msg.rowid, e);
}
}
Ok(())
}
async fn process_message(&self, msg: &lib::Message) -> Result<()> {
// Get message ID from our database
let us_db = self.us_db.lock().await;
let message_id = match db::get_message_id_by_chat_rowid(&us_db, msg.rowid)? {
Some(id) => id,
None => {
warn!("Message {} not found in database, skipping", msg.rowid);
return Ok(());
}
};
// Check if embedding already exists
if db::get_message_embedding(&us_db, message_id)?.is_some() {
return Ok(());
}
// Skip if message has no text
let text = match &msg.text {
Some(t) if !t.is_empty() => t,
_ => return Ok(()),
};
drop(us_db);
// Generate embedding for the full message
// TODO: Replace with actual model inference
let message_embedding = self.generate_embedding(text)?;
// Store message embedding
let us_db = self.us_db.lock().await;
db::insert_message_embedding(&us_db, message_id, &message_embedding, &self.model_name)?;
// Tokenize and generate word embeddings
let words = self.tokenize(text);
for word in words {
// Check if word embedding exists
if db::get_word_embedding(&us_db, &word)?.is_none() {
// Generate embedding for word
let word_embedding = self.generate_embedding(&word)?;
db::insert_word_embedding(&us_db, &word, &word_embedding, &self.model_name)?;
}
}
drop(us_db);
info!("Generated embeddings for message {}", msg.rowid);
Ok(())
}
fn generate_embedding(&self, text: &str) -> Result<Vec<f32>> {
// TODO: Replace with actual model inference using Candle
// For now, return a placeholder embedding of dimension 1024
let embedding = vec![0.0f32; 1024];
Ok(embedding)
}
fn tokenize(&self, text: &str) -> Vec<String> {
// Simple word tokenization (split on whitespace and punctuation)
// TODO: Replace with proper tokenizer
text.split(|c: char| c.is_whitespace() || c.is_ascii_punctuation())
.filter(|s| !s.is_empty())
.map(|s| s.to_lowercase())
.collect()
}
}

View File

@@ -0,0 +1,119 @@
use crate::db;
use anyhow::Result;
use rusqlite::Connection;
use std::sync::Arc;
use tokio::sync::{mpsc, Mutex};
use tracing::{error, info, warn};
/// Service responsible for classifying emotions in messages
pub struct EmotionService {
us_db: Arc<Mutex<Connection>>,
rx: mpsc::Receiver<lib::Message>,
model_version: String,
training_sample_rate: f64,
}
impl EmotionService {
pub fn new(
us_db: Arc<Mutex<Connection>>,
rx: mpsc::Receiver<lib::Message>,
model_version: String,
training_sample_rate: f64,
) -> Self {
Self {
us_db,
rx,
model_version,
training_sample_rate,
}
}
pub async fn run(mut self) -> Result<()> {
info!(
"Starting emotion classification service with model: {}",
self.model_version
);
info!(
"Training sample rate: {:.2}%",
self.training_sample_rate * 100.0
);
// TODO: Load the RoBERTa emotion classification model here
info!("Loading RoBERTa-base-go_emotions model...");
// let model = load_emotion_model(&self.model_version)?;
info!("Emotion model loaded (placeholder)");
while let Some(msg) = self.rx.recv().await {
if let Err(e) = self.process_message(&msg).await {
error!("Error processing message {}: {}", msg.rowid, e);
}
}
Ok(())
}
async fn process_message(&self, msg: &lib::Message) -> Result<()> {
// Get message ID from our database
let us_db = self.us_db.lock().await;
let message_id = match db::get_message_id_by_chat_rowid(&us_db, msg.rowid)? {
Some(id) => id,
None => {
warn!("Message {} not found in database, skipping", msg.rowid);
return Ok(());
}
};
// Check if emotion classification already exists
if db::get_emotion_by_message_id(&us_db, message_id)?.is_some() {
return Ok(());
}
// Skip if message has no text
let text = match &msg.text {
Some(t) if !t.is_empty() => t,
_ => return Ok(()),
};
drop(us_db);
// Classify emotion
// TODO: Replace with actual model inference
let (emotion, confidence) = self.classify_emotion(text)?;
// Store emotion classification
let us_db = self.us_db.lock().await;
db::insert_emotion(&us_db, message_id, &emotion, confidence, &self.model_version)?;
// Randomly add to training set based on sample rate
if rand::random::<f64>() < self.training_sample_rate {
db::insert_training_sample(&us_db, Some(message_id), text, &emotion)?;
info!(
"Added message {} to training set (emotion: {})",
msg.rowid, emotion
);
}
drop(us_db);
info!(
"Classified message {} as {} (confidence: {:.2})",
msg.rowid, emotion, confidence
);
Ok(())
}
fn classify_emotion(&self, text: &str) -> Result<(String, f64)> {
// TODO: Replace with actual RoBERTa-base-go_emotions inference using Candle
// The model outputs probabilities for 28 emotions:
// admiration, amusement, anger, annoyance, approval, caring, confusion,
// curiosity, desire, disappointment, disapproval, disgust, embarrassment,
// excitement, fear, gratitude, grief, joy, love, nervousness, optimism,
// pride, realization, relief, remorse, sadness, surprise, neutral
// For now, return a placeholder
let emotion = "neutral".to_string();
let confidence = 0.85;
Ok((emotion, confidence))
}
}

View File

@@ -0,0 +1,232 @@
use crate::db;
use anyhow::Result;
use rusqlite::Connection;
use std::sync::Arc;
use tokio::sync::Mutex;
use tonic::{Request, Response, Status};
use tracing::{error, info};
// Include the generated protobuf code
pub mod emotions {
tonic::include_proto!("emotions");
}
use emotions::emotion_service_server::{EmotionService as EmotionServiceTrait, EmotionServiceServer};
use emotions::*;
pub struct GrpcServer {
us_db: Arc<Mutex<Connection>>,
address: String,
}
impl GrpcServer {
pub fn new(us_db: Arc<Mutex<Connection>>, address: String) -> Self {
Self { us_db, address }
}
pub async fn run(self) -> Result<()> {
let addr = self.address.parse()?;
info!("Starting gRPC server on {}", self.address);
let service = EmotionServiceImpl {
us_db: self.us_db.clone(),
};
tonic::transport::Server::builder()
.add_service(EmotionServiceServer::new(service))
.serve(addr)
.await?;
Ok(())
}
}
struct EmotionServiceImpl {
us_db: Arc<Mutex<Connection>>,
}
#[tonic::async_trait]
impl EmotionServiceTrait for EmotionServiceImpl {
async fn get_emotion(
&self,
request: Request<GetEmotionRequest>,
) -> Result<Response<Emotion>, Status> {
let req = request.into_inner();
let conn = self.us_db.lock().await;
match db::get_emotion_by_message_id(&conn, req.message_id) {
Ok(Some(emotion)) => Ok(Response::new(emotion_to_proto(emotion))),
Ok(None) => Err(Status::not_found(format!(
"Emotion not found for message_id: {}",
req.message_id
))),
Err(e) => {
error!("Database error: {}", e);
Err(Status::internal("Database error"))
}
}
}
async fn get_emotions(
&self,
request: Request<GetEmotionsRequest>,
) -> Result<Response<EmotionsResponse>, Status> {
let req = request.into_inner();
let conn = self.us_db.lock().await;
let emotion_filter = req.emotion_filter.as_deref();
let min_confidence = req.min_confidence;
let limit = req.limit.map(|l| l as i32);
let offset = req.offset.map(|o| o as i32);
match db::list_emotions(&conn, emotion_filter, min_confidence, limit, offset) {
Ok(emotions) => {
let total_count = db::count_emotions(&conn).unwrap_or(0);
Ok(Response::new(EmotionsResponse {
emotions: emotions.into_iter().map(emotion_to_proto).collect(),
total_count,
}))
}
Err(e) => {
error!("Database error: {}", e);
Err(Status::internal("Database error"))
}
}
}
async fn list_all_emotions(
&self,
_request: Request<Empty>,
) -> Result<Response<EmotionsResponse>, Status> {
let conn = self.us_db.lock().await;
match db::list_emotions(&conn, None, None, None, None) {
Ok(emotions) => {
let total_count = emotions.len() as i32;
Ok(Response::new(EmotionsResponse {
emotions: emotions.into_iter().map(emotion_to_proto).collect(),
total_count,
}))
}
Err(e) => {
error!("Database error: {}", e);
Err(Status::internal("Database error"))
}
}
}
async fn update_emotion(
&self,
request: Request<UpdateEmotionRequest>,
) -> Result<Response<EmotionResponse>, Status> {
let req = request.into_inner();
let conn = self.us_db.lock().await;
match db::update_emotion(&conn, req.message_id, &req.emotion, req.confidence) {
Ok(_) => {
// If notes are provided, add to training set
if let Some(notes) = req.notes {
if let Ok(Some(msg)) = db::get_message(&conn, req.message_id) {
if let Some(text) = msg.text {
let _ = db::insert_training_sample(
&conn,
Some(req.message_id),
&text,
&req.emotion,
);
}
}
}
// Fetch the updated emotion
match db::get_emotion_by_message_id(&conn, req.message_id) {
Ok(Some(emotion)) => Ok(Response::new(EmotionResponse {
success: true,
message: "Emotion updated successfully".to_string(),
emotion: Some(emotion_to_proto(emotion)),
})),
_ => Ok(Response::new(EmotionResponse {
success: true,
message: "Emotion updated successfully".to_string(),
emotion: None,
})),
}
}
Err(e) => {
error!("Database error: {}", e);
Err(Status::internal("Database error"))
}
}
}
async fn batch_update_emotions(
&self,
request: Request<tonic::Streaming<UpdateEmotionRequest>>,
) -> Result<Response<EmotionResponse>, Status> {
let mut stream = request.into_inner();
let mut count = 0;
while let Some(req) = stream.message().await? {
let conn = self.us_db.lock().await;
match db::update_emotion(&conn, req.message_id, &req.emotion, req.confidence) {
Ok(_) => {
count += 1;
if let Some(notes) = req.notes {
if let Ok(Some(msg)) = db::get_message(&conn, req.message_id) {
if let Some(text) = msg.text {
let _ = db::insert_training_sample(
&conn,
Some(req.message_id),
&text,
&req.emotion,
);
}
}
}
}
Err(e) => {
error!("Failed to update emotion for message {}: {}", req.message_id, e);
}
}
drop(conn);
}
Ok(Response::new(EmotionResponse {
success: true,
message: format!("Updated {} emotions", count),
emotion: None,
}))
}
async fn delete_emotion(
&self,
request: Request<DeleteEmotionRequest>,
) -> Result<Response<EmotionResponse>, Status> {
let req = request.into_inner();
let conn = self.us_db.lock().await;
match db::delete_emotion(&conn, req.id) {
Ok(_) => Ok(Response::new(EmotionResponse {
success: true,
message: format!("Emotion {} deleted successfully", req.id),
emotion: None,
})),
Err(e) => {
error!("Database error: {}", e);
Err(Status::internal("Database error"))
}
}
}
}
fn emotion_to_proto(emotion: crate::models::Emotion) -> Emotion {
Emotion {
id: emotion.id,
message_id: emotion.message_id,
emotion: emotion.emotion,
confidence: emotion.confidence,
model_version: emotion.model_version,
created_at: emotion.created_at.timestamp(),
updated_at: emotion.updated_at.timestamp(),
}
}

View File

@@ -0,0 +1,7 @@
pub mod chat_poller;
pub mod embedding_service;
pub mod emotion_service;
pub use chat_poller::ChatPollerService;
pub use embedding_service::EmbeddingService;
pub use emotion_service::EmotionService;

View File

@@ -0,0 +1,114 @@
use bevy::prelude::*;
use lib::sync::{Syncable, SyncMessage};
use crate::components::*;
/// Bevy plugin for transparent CRDT sync via gossip
pub struct SyncPlugin;
impl Plugin for SyncPlugin {
fn build(&self, app: &mut App) {
app.add_systems(Update, (
publish_sync_ops,
receive_sync_ops,
));
}
}
/// Trait for Bevy resources that can be synced
pub trait SyncedResource: Resource + Syncable + Clone + Send + Sync + 'static {}
/// Queue of sync operations to publish
#[derive(Resource, Default)]
pub struct SyncOpQueue<T: Syncable> {
pub ops: Vec<T::Operation>,
}
impl<T: Syncable> SyncOpQueue<T> {
pub fn push(&mut self, op: T::Operation) {
self.ops.push(op);
}
}
/// System to publish sync operations to gossip
fn publish_sync_ops<T: SyncedResource>(
mut queue: ResMut<SyncOpQueue<T>>,
resource: Res<T>,
sender: Option<Res<IrohGossipSender>>,
) {
if sender.is_none() || queue.ops.is_empty() {
return;
}
let sender = sender.unwrap();
let sender_guard = sender.sender.lock();
for op in queue.ops.drain(..) {
let sync_msg = resource.create_sync_message(op);
match sync_msg.to_bytes() {
Ok(bytes) => {
println!("Publishing sync operation: {} bytes", bytes.len());
// TODO: Actually send via gossip
// sender_guard.broadcast(bytes)?;
}
Err(e) => {
eprintln!("Failed to serialize sync operation: {}", e);
}
}
}
}
/// System to receive and apply sync operations from gossip
fn receive_sync_ops<T: SyncedResource>(
mut resource: ResMut<T>,
receiver: Option<Res<IrohGossipReceiver>>,
) {
if receiver.is_none() {
return;
}
// TODO: Poll receiver for messages
// For each message:
// 1. Deserialize SyncMessage<T::Operation>
// 2. Apply to resource with resource.apply_sync_op(&op)
}
/// Helper to register a synced resource
pub trait SyncedResourceExt {
fn add_synced_resource<T: SyncedResource>(&mut self) -> &mut Self;
}
impl SyncedResourceExt for App {
fn add_synced_resource<T: SyncedResource>(&mut self) -> &mut Self {
self.init_resource::<SyncOpQueue<T>>();
self
}
}
/// Example synced resource
#[cfg(test)]
mod tests {
use super::*;
use lib::sync::synced;
#[synced]
pub struct TestConfig {
pub value: i32,
#[sync(skip)]
node_id: String,
}
impl Resource for TestConfig {}
impl SyncedResource for TestConfig {}
#[test]
fn test_sync_plugin() {
let mut app = App::new();
app.add_plugins(MinimalPlugins);
app.add_plugins(SyncPlugin);
app.add_synced_resource::<TestConfig>();
// TODO: Test that operations are queued and published
}
}

View File

@@ -0,0 +1,12 @@
use bevy::prelude::*;
use crate::components::*;
/// System: Poll chat.db for new messages using Bevy's task system
pub fn poll_chat_db(
_config: Res<AppConfig>,
_db: Res<Database>,
) {
// TODO: Use Bevy's AsyncComputeTaskPool to poll chat.db
// This will replace the tokio::spawn chat poller
}

View File

@@ -0,0 +1,116 @@
use bevy::prelude::*;
use parking_lot::Mutex;
use std::sync::Arc;
use crate::components::*;
/// System: Poll the gossip init task and insert resources when complete
pub fn poll_gossip_init(
mut commands: Commands,
mut init_task: Option<ResMut<GossipInitTask>>,
) {
if let Some(mut task) = init_task {
// Check if the task is finished (non-blocking)
if let Some(result) = bevy::tasks::block_on(bevy::tasks::futures_lite::future::poll_once(&mut task.0)) {
if let Some((endpoint, gossip, router, sender, receiver)) = result {
println!("Inserting gossip resources");
// Insert all the resources
commands.insert_resource(IrohEndpoint {
endpoint,
node_id: "TODO".to_string(), // TODO: Figure out how to get node_id in iroh 0.95
});
commands.insert_resource(IrohGossipHandle { gossip });
commands.insert_resource(IrohRouter { router });
commands.insert_resource(IrohGossipSender {
sender: Arc::new(Mutex::new(sender)),
});
commands.insert_resource(IrohGossipReceiver {
receiver: Arc::new(Mutex::new(receiver)),
});
// Remove the init task
commands.remove_resource::<GossipInitTask>();
}
}
}
}
/// System: Detect new messages in SQLite that need to be published to gossip
pub fn detect_new_messages(
_db: Res<Database>,
_last_synced: Local<i64>,
_publish_events: MessageWriter<PublishMessageEvent>,
) {
// TODO: Query SQLite for messages with rowid > last_synced
// When we detect new messages, we'll send PublishMessageEvent
}
/// System: Publish messages to gossip when PublishMessageEvent is triggered
pub fn publish_to_gossip(
mut events: MessageReader<PublishMessageEvent>,
sender: Option<Res<IrohGossipSender>>,
endpoint: Option<Res<IrohEndpoint>>,
) {
if sender.is_none() || endpoint.is_none() {
// Gossip not initialized yet, skip
return;
}
let sender = sender.unwrap();
let endpoint = endpoint.unwrap();
for event in events.read() {
println!("Publishing message {} to gossip", event.message.rowid);
// Create sync message
let sync_message = SyncMessage {
message: event.message.clone(),
sync_timestamp: chrono::Utc::now().timestamp(),
publisher_node_id: endpoint.node_id.clone(),
};
// Serialize the message
match serialize_sync_message(&sync_message) {
Ok(bytes) => {
// TODO: Publish to gossip
// For now, just log that we would publish
println!("Would publish {} bytes to gossip", bytes.len());
// Note: Direct async broadcasting from Bevy systems is tricky due to Sync requirements
// We'll need to use a different approach, possibly with channels or a dedicated task
}
Err(e) => {
eprintln!("Failed to serialize sync message: {}", e);
}
}
}
}
/// System: Receive messages from gossip
pub fn receive_from_gossip(
mut _gossip_events: MessageWriter<GossipMessageReceived>,
receiver: Option<Res<IrohGossipReceiver>>,
) {
if receiver.is_none() {
// Gossip not initialized yet, skip
return;
}
// TODO: Implement proper async message reception
// This will require spawning a long-running task that listens for gossip events
// and sends them as Bevy messages. For now, this is a placeholder.
}
/// System: Save received gossip messages to SQLite
pub fn save_gossip_messages(
mut events: MessageReader<GossipMessageReceived>,
_db: Res<Database>,
) {
for event in events.read() {
println!("Received message {} from gossip (published by {})",
event.sync_message.message.rowid,
event.sync_message.publisher_node_id);
// TODO: Save to SQLite if we don't already have it
}
}

View File

@@ -0,0 +1,7 @@
pub mod database;
pub mod gossip;
pub mod setup;
pub use database::*;
pub use gossip::*;
pub use setup::*;

View File

@@ -0,0 +1,22 @@
use bevy::prelude::*;
use bevy::tasks::AsyncComputeTaskPool;
use crate::components::*;
/// Startup system: Initialize database
pub fn setup_database(_db: Res<Database>) {
println!("Database resource initialized");
}
/// Startup system: Initialize Iroh gossip
pub fn setup_gossip(mut commands: Commands, topic: Res<GossipTopic>) {
println!("Setting up Iroh gossip for topic: {:?}", topic.0);
let topic_id = topic.0;
// TODO: Initialize gossip properly
// For now, skip async initialization due to Sync requirements in Bevy tasks
// We'll need to use a different initialization strategy
println!("Gossip initialization skipped (TODO: implement proper async init)");
}

View File

@@ -0,0 +1,15 @@
[package]
name = "sync-macros"
version = "0.1.0"
edition.workspace = true
[lib]
proc-macro = true
[dependencies]
syn = { version = "2.0", features = ["full"] }
quote = "1.0"
proc-macro2 = "1.0"
[dev-dependencies]
lib = { path = "../lib" }

View File

@@ -0,0 +1,345 @@
use proc_macro::TokenStream;
use quote::{quote, format_ident};
use syn::{parse_macro_input, DeriveInput, Data, Fields, Type, ItemStruct};
/// Attribute macro for transparent CRDT sync
///
/// Transforms your struct to use CRDTs internally while keeping the API simple.
///
/// # Example
/// ```
/// #[synced]
/// struct EmotionGradientConfig {
/// canvas_width: f32, // Becomes SyncedValue<f32> internally
/// canvas_height: f32, // Auto-generates getters/setters
///
/// #[sync(skip)]
/// node_id: String, // Not synced
/// }
///
/// // Use it like a normal struct:
/// let mut config = EmotionGradientConfig::new("node1".into());
/// config.set_canvas_width(1024.0); // Auto-generates sync operation
/// println!("Width: {}", config.canvas_width()); // Transparent access
/// ```
#[proc_macro_attribute]
pub fn synced(_attr: TokenStream, item: TokenStream) -> TokenStream {
let input = parse_macro_input!(item as ItemStruct);
let name = &input.ident;
let vis = &input.vis;
let op_enum_name = format_ident!("{}Op", name);
let fields = match &input.fields {
Fields::Named(fields) => &fields.named,
_ => panic!("synced only supports structs with named fields"),
};
let mut internal_fields = Vec::new();
let mut field_getters = Vec::new();
let mut field_setters = Vec::new();
let mut op_variants = Vec::new();
let mut apply_arms = Vec::new();
let mut merge_code = Vec::new();
let mut new_params = Vec::new();
let mut new_init = Vec::new();
for field in fields {
let field_name = field.ident.as_ref().unwrap();
let field_vis = &field.vis;
let field_type = &field.ty;
// Check if field should be skipped
let should_skip = field.attrs.iter().any(|attr| {
attr.path().is_ident("sync")
&& attr
.parse_args::<syn::Ident>()
.map(|i| i == "skip")
.unwrap_or(false)
});
if should_skip {
// Keep as-is, no wrapping
internal_fields.push(quote! {
#field_vis #field_name: #field_type
});
new_params.push(quote! { #field_name: #field_type });
new_init.push(quote! { #field_name });
continue;
}
// Wrap in SyncedValue
internal_fields.push(quote! {
#field_name: lib::sync::SyncedValue<#field_type>
});
// Generate getter
field_getters.push(quote! {
#field_vis fn #field_name(&self) -> &#field_type {
self.#field_name.get()
}
});
// Generate setter that returns operation
let setter_name = format_ident!("set_{}", field_name);
let op_variant = format_ident!(
"Set{}",
field_name
.to_string()
.chars()
.enumerate()
.map(|(i, c)| if i == 0 {
c.to_ascii_uppercase()
} else {
c
})
.collect::<String>()
);
field_setters.push(quote! {
#field_vis fn #setter_name(&mut self, value: #field_type) -> #op_enum_name {
let op = #op_enum_name::#op_variant {
value: value.clone(),
timestamp: chrono::Utc::now(),
node_id: self.node_id().clone(),
};
self.#field_name.set(value, self.node_id().clone());
op
}
});
// Generate operation variant
op_variants.push(quote! {
#op_variant {
value: #field_type,
timestamp: chrono::DateTime<chrono::Utc>,
node_id: String,
}
});
// Generate apply arm
apply_arms.push(quote! {
#op_enum_name::#op_variant { value, timestamp, node_id } => {
self.#field_name.apply_lww(value.clone(), timestamp.clone(), node_id.clone());
}
});
// Generate merge code
merge_code.push(quote! {
self.#field_name.merge(&other.#field_name);
});
// Add to new() parameters
new_params.push(quote! { #field_name: #field_type });
new_init.push(quote! {
#field_name: lib::sync::SyncedValue::new(#field_name, node_id.clone())
});
}
let expanded = quote! {
/// Sync operations enum
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(tag = "type")]
#vis enum #op_enum_name {
#(#op_variants),*
}
impl #op_enum_name {
pub fn to_bytes(&self) -> anyhow::Result<Vec<u8>> {
Ok(serde_json::to_vec(self)?)
}
pub fn from_bytes(bytes: &[u8]) -> anyhow::Result<Self> {
Ok(serde_json::from_slice(bytes)?)
}
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#vis struct #name {
#(#internal_fields),*
}
impl #name {
#vis fn new(#(#new_params),*) -> Self {
Self {
#(#new_init),*
}
}
/// Transparent field accessors
#(#field_getters)*
/// Field setters that generate sync operations
#(#field_setters)*
/// Apply a sync operation from another node
#vis fn apply_op(&mut self, op: &#op_enum_name) {
match op {
#(#apply_arms),*
}
}
/// Merge state from another instance
#vis fn merge(&mut self, other: &Self) {
#(#merge_code)*
}
}
impl lib::sync::Syncable for #name {
type Operation = #op_enum_name;
fn apply_sync_op(&mut self, op: &Self::Operation) {
self.apply_op(op);
}
fn node_id(&self) -> &lib::sync::NodeId {
// Assume there's a node_id field marked with #[sync(skip)]
&self.node_id
}
}
};
TokenStream::from(expanded)
}
/// Old derive macro - kept for backwards compatibility
#[proc_macro_derive(Synced, attributes(sync))]
pub fn derive_synced(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let name = &input.ident;
let op_enum_name = format_ident!("{}Op", name);
let fields = match &input.data {
Data::Struct(data) => match &data.fields {
Fields::Named(fields) => &fields.named,
_ => panic!("Synced only supports structs with named fields"),
},
_ => panic!("Synced only supports structs"),
};
let mut field_ops = Vec::new();
let mut apply_arms = Vec::new();
let mut setter_methods = Vec::new();
let mut merge_code = Vec::new();
for field in fields {
let field_name = field.ident.as_ref().unwrap();
let field_type = &field.ty;
// Check if field should be skipped
let should_skip = field.attrs.iter()
.any(|attr| {
attr.path().is_ident("sync") &&
attr.parse_args::<syn::Ident>()
.map(|i| i == "skip")
.unwrap_or(false)
});
if should_skip {
continue;
}
let op_variant = format_ident!("Set{}",
field_name.to_string()
.chars()
.enumerate()
.map(|(i, c)| if i == 0 { c.to_ascii_uppercase() } else { c })
.collect::<String>()
);
let setter_name = format_ident!("set_{}", field_name);
// Determine CRDT strategy based on type
let crdt_strategy = get_crdt_strategy(field_type);
match crdt_strategy.as_str() {
"lww" => {
// LWW for simple types
field_ops.push(quote! {
#op_variant {
value: #field_type,
timestamp: chrono::DateTime<chrono::Utc>,
node_id: String,
}
});
apply_arms.push(quote! {
#op_enum_name::#op_variant { value, timestamp, node_id } => {
self.#field_name.apply_lww(value.clone(), timestamp.clone(), node_id.clone());
}
});
setter_methods.push(quote! {
pub fn #setter_name(&mut self, value: #field_type) -> #op_enum_name {
let op = #op_enum_name::#op_variant {
value: value.clone(),
timestamp: chrono::Utc::now(),
node_id: self.node_id().clone(),
};
self.#field_name = lib::sync::SyncedValue::new(value, self.node_id().clone());
op
}
});
merge_code.push(quote! {
self.#field_name.merge(&other.#field_name);
});
}
_ => {
// Default to LWW
}
}
}
let expanded = quote! {
/// Auto-generated sync operations enum
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(tag = "type")]
pub enum #op_enum_name {
#(#field_ops),*
}
impl #op_enum_name {
pub fn to_bytes(&self) -> anyhow::Result<Vec<u8>> {
Ok(serde_json::to_vec(self)?)
}
pub fn from_bytes(bytes: &[u8]) -> anyhow::Result<Self> {
Ok(serde_json::from_slice(bytes)?)
}
}
impl #name {
/// Apply a sync operation from another node
pub fn apply_op(&mut self, op: &#op_enum_name) {
match op {
#(#apply_arms),*
}
}
/// Merge state from another instance
pub fn merge(&mut self, other: &Self) {
#(#merge_code)*
}
/// Auto-generated setter methods that create sync ops
#(#setter_methods)*
}
impl lib::sync::Syncable for #name {
type Operation = #op_enum_name;
fn apply_sync_op(&mut self, op: &Self::Operation) {
self.apply_op(op);
}
}
};
TokenStream::from(expanded)
}
/// Determine CRDT strategy based on field type
fn get_crdt_strategy(_ty: &Type) -> String {
// For now, default everything to LWW
// TODO: Detect HashMap -> use Map, Vec -> use ORSet, etc.
"lww".to_string()
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,566 @@
# RFC 0002: Persistence Strategy for Battery-Efficient State Management
**Status:** Draft
**Authors:** Sienna
**Created:** 2025-11-15
**Related:** RFC 0001 (CRDT Sync Protocol)
## Abstract
This RFC defines a persistence strategy that balances data durability with battery efficiency for mobile platforms (iPad). The core challenge: Bevy runs at 60fps and generates continuous state changes, but we can't write to SQLite on every frame without destroying battery life and flash storage.
## The Problem
**Naive approach (bad)**:
```rust
fn sync_to_db_system(query: Query<&NetworkedEntity, Changed<Transform>>) {
for entity in query.iter() {
db.execute("UPDATE components SET data = ? WHERE entity_id = ?", ...)?;
// This runs 60 times per second!
// iPad battery: 💀
}
}
```
**Why this is terrible**:
- SQLite writes trigger `fsync()` syscalls (flush to physical storage)
- Each `fsync()` on iOS can take 5-20ms and drains battery significantly
- At 60fps with multiple entities, we'd be doing hundreds of disk writes per second
- Flash wear: mobile devices have limited write cycles
- User moves object around → hundreds of unnecessary writes of intermediate positions
## Requirements
1. **Survive crashes**: If the app crashes, user shouldn't lose more than a few seconds of work
2. **Battery efficient**: Minimize disk I/O, especially `fsync()` calls
3. **Flash-friendly**: Reduce write amplification on mobile storage
4. **Low latency**: Persistence shouldn't block rendering or input
5. **Recoverable**: On startup, we should be able to reconstruct recent state
## Categorizing Data by Persistence Needs
Not all data is equal. We need to categorize by how critical immediate persistence is:
### Tier 1: Critical State (Persist Immediately)
**What**: State that's hard or impossible to reconstruct if lost
- User-created entities (the fact that they exist)
- Operation log entries (for CRDT sync)
- Vector clock state (for causality tracking)
- Document metadata (name, creation time, etc.)
**Why**: These are the "source of truth" - if we lose them, data is gone
**Strategy**: Write to database within ~1 second of creation, but still batched
### Tier 2: Derived State (Defer and Batch)
**What**: State that can be reconstructed or is constantly changing
- Entity positions during drag operations
- Transform components (position, rotation, scale)
- UI state (selected items, viewport position)
- Temporary drawing strokes in progress
**Why**: These change rapidly and the intermediate states aren't valuable
**Strategy**: Batch writes, flush every 5-10 seconds or on specific events
### Tier 3: Ephemeral State (Never Persist)
**What**: State that only matters during current session
- Remote peer cursors
- Presence indicators (who's online)
- Network connection status
- Frame-rate metrics
**Why**: These are meaningless after restart
**Strategy**: Keep in-memory only (Bevy resources, not components)
## Write Strategy: The Three-Buffer System
We use a three-tier approach to minimize disk writes while maintaining durability:
### Layer 1: In-Memory Dirty Tracking (0ms latency)
Bevy change detection marks components as dirty, but we don't write immediately. Instead, we maintain a dirty set:
```rust
#[derive(Resource)]
struct DirtyEntities {
// Entities with changes not yet in write buffer
entities: HashSet<Uuid>,
components: HashMap<Uuid, HashSet<String>>, // entity → dirty component types
last_modified: HashMap<Uuid, Instant>, // when was it last changed
}
```
**Update frequency**: Every frame (cheap - just memory operations)
### Layer 2: Write Buffer (100ms-1s batching)
Periodically (every 100ms-1s), we collect dirty entities and prepare a write batch:
```rust
#[derive(Resource)]
struct WriteBuffer {
// Pending writes not yet committed to SQLite
pending_operations: Vec<PersistenceOp>,
last_flush: Instant,
}
enum PersistenceOp {
UpsertEntity { id: Uuid, data: EntityData },
UpsertComponent { entity_id: Uuid, component_type: String, data: Vec<u8> },
LogOperation { node_id: NodeId, seq: u64, op: Vec<u8> },
UpdateVectorClock { node_id: NodeId, counter: u64 },
}
```
**Update frequency**: Every 100ms-1s (configurable based on battery level)
**Strategy**: Accumulate operations in memory, then batch-write them
### Layer 3: SQLite with WAL Mode (5-10s commit interval)
Write buffer is flushed to SQLite, but we don't call `fsync()` immediately. Instead, we use WAL mode and control checkpoint timing:
```sql
-- Enable Write-Ahead Logging
PRAGMA journal_mode = WAL;
-- Don't auto-checkpoint on every transaction
PRAGMA wal_autocheckpoint = 0;
-- Synchronous = NORMAL (fsync WAL on commit, but not every write)
PRAGMA synchronous = NORMAL;
```
**Update frequency**: Manual checkpoints every 5-10 seconds (or on specific events)
## Flush Events: When to Force Persistence
Certain events require immediate persistence (within 1 second):
### 1. Entity Creation
When user creates a new entity, we need to persist its existence quickly:
- Add to write buffer immediately
- Trigger flush within 1 second
### 2. Major User Actions
Actions that represent "savepoints" in user's mental model:
- Finishing a drawing stroke (stroke start → immediate, intermediate points → batched, stroke end → flush)
- Deleting entities
- Changing document metadata
- Undo/redo operations
### 3. Application State Transitions
State changes that might precede app termination:
- App going to background (iOS `applicationWillResignActive`)
- Low memory warning
- User explicitly saving (if we have a save button)
- Switching documents/workspaces
### 4. Network Events
Sync protocol events that need persistence:
- Receiving operation log entries from peers
- Vector clock updates (every 5 operations or 5 seconds, whichever comes first)
### 5. Periodic Background Flush
Even if no major events happen:
- Flush every 10 seconds during active use
- Flush every 30 seconds when idle (no user input for >1 minute)
## Battery-Adaptive Flushing
Different flush strategies based on battery level:
```rust
fn get_flush_interval(battery_level: f32, is_charging: bool) -> Duration {
if is_charging {
Duration::from_secs(5) // Aggressive - power available
} else if battery_level > 0.5 {
Duration::from_secs(10) // Normal
} else if battery_level > 0.2 {
Duration::from_secs(30) // Conservative
} else {
Duration::from_secs(60) // Very conservative - low battery
}
}
```
**On iOS**: Use `UIDevice.current.batteryLevel` and `UIDevice.current.batteryState`
## SQLite Optimizations for Mobile
### Transaction Batching
Group multiple writes into a single transaction:
```rust
async fn flush_write_buffer(buffer: &WriteBuffer, db: &Connection) -> Result<()> {
let tx = db.transaction()?;
// All writes in one transaction
for op in &buffer.pending_operations {
match op {
PersistenceOp::UpsertEntity { id, data } => {
tx.execute("INSERT OR REPLACE INTO entities (...) VALUES (...)", ...)?;
}
PersistenceOp::UpsertComponent { entity_id, component_type, data } => {
tx.execute("INSERT OR REPLACE INTO components (...) VALUES (...)", ...)?;
}
// ...
}
}
tx.commit()?; // Single fsync for entire batch
}
```
**Impact**: 100 individual writes = 100 fsyncs. 1 transaction with 100 writes = 1 fsync.
### WAL Mode Checkpoint Control
```rust
async fn checkpoint_wal(db: &Connection) -> Result<()> {
// Manually checkpoint WAL to database file
db.execute("PRAGMA wal_checkpoint(PASSIVE)", [])?;
}
```
**PASSIVE checkpoint**: Doesn't block readers, syncs when possible
**When to checkpoint**: Every 10 seconds, or when WAL exceeds 1MB
### Index Strategy
Be selective about indexes - they increase write cost:
```sql
-- Only index what we actually query frequently
CREATE INDEX idx_components_entity ON components(entity_id);
CREATE INDEX idx_oplog_node_seq ON operation_log(node_id, sequence_number);
-- DON'T index everything just because we can
-- Every index = extra writes on every INSERT/UPDATE
```
### Page Size Optimization
```sql
-- Larger page size = fewer I/O operations for sequential writes
-- Default is 4KB, but 8KB or 16KB can be better for mobile
PRAGMA page_size = 8192;
```
**Caveat**: Must be set before database is created (or VACUUM to rebuild)
## Recovery Strategy
What happens if app crashes before flush?
### What We Lose
**Worst case**: Up to 10 seconds of component updates (positions, transforms)
**What we DON'T lose**:
- Entity existence (flushed within 1 second of creation)
- Operation log entries (flushed with vector clock updates)
- Any data from before the last checkpoint
### Recovery on Startup
```mermaid
graph TB
A[App Starts] --> B[Open SQLite]
B --> C{Check WAL file}
C -->|WAL exists| D[Recover from WAL]
C -->|No WAL| E[Load from main DB]
D --> F[Load entities from DB]
E --> F
F --> G[Load operation log]
G --> H[Rebuild vector clock]
H --> I[Connect to gossip]
I --> J[Request sync from peers]
J --> K[Fill any gaps via anti-entropy]
K --> L[Fully recovered]
```
**Key insight**: Even if we lose local state, gossip sync repairs it. Peers send us missing operations.
### Crash Detection
On startup, detect if previous session crashed:
```sql
CREATE TABLE session_state (
key TEXT PRIMARY KEY,
value TEXT
);
-- On startup, check if previous session closed cleanly
SELECT value FROM session_state WHERE key = 'clean_shutdown';
-- If not found or 'false', we crashed
-- Trigger recovery procedures
```
## Platform-Specific Concerns
### iOS / iPadOS
**Background app suspension**: iOS aggressively suspends apps. We have ~5 seconds when moving to background:
```rust
// When app moves to background:
fn handle_background_event() {
// Force immediate flush
flush_write_buffer().await?;
checkpoint_wal().await?;
// Mark clean shutdown
db.execute("INSERT OR REPLACE INTO session_state VALUES ('clean_shutdown', 'true')", [])?;
}
```
**Low Power Mode**: Detect and reduce flush frequency:
```swift
// iOS-specific detection
if ProcessInfo.processInfo.isLowPowerModeEnabled {
set_flush_interval(Duration::from_secs(60));
}
```
### Desktop (macOS/Linux/Windows)
More relaxed constraints:
- Battery life less critical on plugged-in desktops
- Can use more aggressive flush intervals (every 5 seconds)
- Larger WAL sizes acceptable (up to 10MB before checkpoint)
## Monitoring & Metrics
Track these metrics to tune persistence:
```rust
struct PersistenceMetrics {
// Write volume
total_writes: u64,
bytes_written: u64,
// Timing
flush_count: u64,
avg_flush_duration: Duration,
checkpoint_count: u64,
avg_checkpoint_duration: Duration,
// WAL health
wal_size_bytes: u64,
max_wal_size_bytes: u64,
// Recovery
crash_recovery_count: u64,
clean_shutdown_count: u64,
}
```
**Alerts**:
- Flush duration >50ms (disk might be slow or overloaded)
- WAL size >5MB (checkpoint more frequently)
- Crash recovery rate >10% (need more aggressive flushing)
## Write Coalescing: Deduplication
When the same entity is modified multiple times before flush, we only keep the latest:
```rust
fn add_to_write_buffer(op: PersistenceOp, buffer: &mut WriteBuffer) {
match op {
PersistenceOp::UpsertComponent { entity_id, component_type, data } => {
// Remove any existing pending write for this entity+component
buffer.pending_operations.retain(|existing_op| {
!matches!(existing_op,
PersistenceOp::UpsertComponent {
entity_id: e_id,
component_type: c_type,
..
} if e_id == &entity_id && c_type == &component_type
)
});
// Add the new one (latest state)
buffer.pending_operations.push(op);
}
// ...
}
}
```
**Impact**: User drags object for 5 seconds @ 60fps = 300 transform updates → coalesced to 1 write
## Persistence vs Sync: Division of Responsibility
Important distinction:
**Persistence layer** (this RFC):
- Writes to local SQLite
- Optimized for durability and battery life
- Only cares about local state survival
**Sync layer** (RFC 0001):
- Broadcasts operations via gossip
- Maintains operation log for anti-entropy
- Ensures eventual consistency across peers
**Key insight**: These operate independently. An operation can be:
1. Logged to operation log (for sync) - happens immediately
2. Applied to ECS (for rendering) - happens immediately
3. Persisted to SQLite (for durability) - happens on flush schedule
If local state is lost due to delayed flush, sync layer repairs it from peers.
## Configuration Schema
Expose configuration for tuning:
```toml
[persistence]
# Base flush interval (may be adjusted by battery level)
flush_interval_secs = 10
# Max time to defer critical writes (entity creation, etc.)
critical_flush_delay_ms = 1000
# WAL checkpoint interval
checkpoint_interval_secs = 30
# Max WAL size before forced checkpoint
max_wal_size_mb = 5
# Adaptive flushing based on battery
battery_adaptive = true
# Flush intervals per battery tier
[persistence.battery_tiers]
charging = 5
high = 10 # >50%
medium = 30 # 20-50%
low = 60 # <20%
# Platform overrides
[persistence.ios]
background_flush_timeout_secs = 5
low_power_mode_interval_secs = 60
```
## Example System Implementation
```rust
fn persistence_system(
dirty: Res<DirtyEntities>,
mut write_buffer: ResMut<WriteBuffer>,
db: Res<DatabaseConnection>,
time: Res<Time>,
battery: Res<BatteryStatus>,
query: Query<(Entity, &NetworkedEntity, &Transform, &/* other components */)>,
) {
// Step 1: Check if it's time to collect dirty entities
let flush_interval = get_flush_interval(battery.level, battery.is_charging);
if time.elapsed() - write_buffer.last_flush < flush_interval {
return; // Not time yet
}
// Step 2: Collect dirty entities into write buffer
for entity_uuid in &dirty.entities {
if let Some((entity, net_entity, transform, /* ... */)) =
query.iter().find(|(_, ne, ..)| ne.network_id == *entity_uuid)
{
// Serialize component
let transform_data = bincode::serialize(transform)?;
// Add to write buffer (coalescing happens here)
write_buffer.add(PersistenceOp::UpsertComponent {
entity_id: *entity_uuid,
component_type: "Transform".to_string(),
data: transform_data,
});
}
}
// Step 3: Flush write buffer to SQLite (async, non-blocking)
if write_buffer.pending_operations.len() > 0 {
let ops = std::mem::take(&mut write_buffer.pending_operations);
// Spawn async task to write to SQLite
spawn_blocking(move || {
flush_to_sqlite(&ops, &db)
});
write_buffer.last_flush = time.elapsed();
}
// Step 4: Clear dirty tracking (they're now in write buffer/SQLite)
dirty.entities.clear();
}
```
## Trade-offs and Decisions
### Why WAL Mode?
**Alternatives**:
- DELETE mode (traditional journaling)
- MEMORY mode (no durability)
**Decision**: WAL mode because:
- Better write concurrency (readers don't block writers)
- Fewer `fsync()` calls (only on checkpoint)
- Better crash recovery (WAL can be replayed)
### Why Not Use a Dirty Flag on Components?
We could mark components with a `#[derive(Dirty)]` flag, but:
- Bevy's `Changed<T>` already gives us change detection for free
- A separate dirty flag adds memory overhead
- We'd need to manually clear flags after persistence
**Decision**: Use Bevy's change detection + our own dirty tracking resource
### Why Not Use a Separate Persistence Thread?
We could run SQLite writes on a dedicated thread:
**Pros**: Never blocks main thread
**Cons**: More complex synchronization, harder to guarantee flush order
**Decision**: Use `spawn_blocking` from async runtime (Tokio). Simpler, good enough.
## Open Questions
1. **Write ordering**: Do we need to guarantee operation log entries are persisted before entity state? Or can they be out of order?
2. **Compression**: Should we compress component data before writing to SQLite? Trade-off: CPU vs I/O
3. **Memory limits**: On iPad with 2GB RAM, how large can the write buffer grow before we force a flush?
## Success Criteria
We'll know this is working when:
- [ ] App can run for 30 minutes with <5% battery drain attributed to persistence
- [ ] Crash recovery loses <10 seconds of work
- [ ] No perceptible frame drops during flush operations
- [ ] SQLite file size grows linearly with user data, not explosively
- [ ] WAL checkpoints complete in <100ms
## Implementation Phases
1. **Phase 1**: Basic in-memory dirty tracking + batched writes
2. **Phase 2**: WAL mode + manual checkpoint control
3. **Phase 3**: Battery-adaptive flushing
4. **Phase 4**: iOS background handling
5. **Phase 5**: Monitoring and tuning based on metrics
## References
- [SQLite WAL Mode](https://www.sqlite.org/wal.html)
- [iOS Background Execution](https://developer.apple.com/documentation/uikit/app_and_environment/scenes/preparing_your_ui_to_run_in_the_background)
- [Bevy Change Detection](https://docs.rs/bevy/latest/bevy/ecs/change_detection/)

39
docs/rfcs/README.md Normal file
View File

@@ -0,0 +1,39 @@
# RFCs
Request for Comments (RFCs) for major design decisions in the Lonni project.
## Active RFCs
- [RFC 0001: CRDT Synchronization Protocol over iroh-gossip](./0001-crdt-gossip-sync.md) - Draft
## RFC Process
1. **Draft**: Initial proposal, open for discussion
2. **Review**: Team reviews and provides feedback
3. **Accepted**: Approved for implementation
4. **Implemented**: Design has been built
5. **Superseded**: Replaced by a newer RFC
RFCs are living documents - they can be updated as we learn during implementation.
## When to Write an RFC
Write an RFC when:
- Making architectural decisions that affect multiple parts of the system
- Choosing between significantly different approaches
- Introducing new protocols or APIs
- Making breaking changes
Don't write an RFC for:
- Small bug fixes
- Minor refactors
- Isolated feature additions
- Experimental prototypes
## RFC Format
- **Narrative first**: Tell the story of why and how
- **Explain trade-offs**: What alternatives were considered?
- **API examples**: Show how it would be used (not full implementations)
- **Open questions**: What's still unclear?
- **Success criteria**: How do we know it works?

640
index.html Normal file
View File

@@ -0,0 +1,640 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>go_emotions Gradient Space - OKLab Edition</title>
<style>
body {
margin: 0;
padding: 20px;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
background: #1a1a1a;
color: #fff;
height: 100vh;
overflow: hidden;
}
.container {
max-width: 1600px;
margin: 0 auto;
display: grid;
grid-template-columns: 1fr 350px;
gap: 20px;
height: calc(100vh - 40px);
}
.main-area {
min-width: 0;
overflow-y: auto;
}
.controls {
background: #2a2a2a;
padding: 20px;
border-radius: 8px;
max-height: calc(100vh - 40px);
overflow-y: auto;
}
h1 {
margin-bottom: 10px;
font-size: 24px;
}
.subtitle {
margin-bottom: 20px;
color: #aaa;
font-size: 14px;
}
canvas {
display: block;
margin: 20px auto;
border: 1px solid #444;
cursor: crosshair;
touch-action: none;
background: #000;
}
canvas.dragging {
cursor: move !important;
}
canvas.hovering {
cursor: grab;
}
.info {
margin-top: 20px;
padding: 15px;
background: #2a2a2a;
border-radius: 8px;
font-family: monospace;
font-size: 13px;
}
.weights {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(200px, 1fr));
gap: 5px;
margin-top: 10px;
}
.weight-item {
display: flex;
justify-content: space-between;
}
.weight-bar {
height: 4px;
background: #555;
margin-top: 2px;
}
.weight-fill {
height: 100%;
background: #4FC3F7;
}
.emotion-control {
margin-bottom: 15px;
padding: 10px;
background: #1a1a1a;
border-radius: 4px;
}
.emotion-control label {
display: block;
font-size: 12px;
margin-bottom: 5px;
text-transform: capitalize;
}
.emotion-control input[type="color"] {
width: 100%;
height: 30px;
border: none;
border-radius: 4px;
cursor: pointer;
}
.export-btn {
width: 100%;
padding: 12px;
background: #4FC3F7;
color: #000;
border: none;
border-radius: 4px;
font-weight: bold;
cursor: pointer;
font-size: 14px;
margin-bottom: 20px;
}
.export-btn:hover {
background: #6FD3FF;
}
.controls h2 {
font-size: 16px;
margin-bottom: 15px;
}
.hint {
font-size: 11px;
color: #888;
margin-top: 5px;
}
.loading-spinner {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 1000;
display: none;
}
.loading-spinner.active {
display: flex;
flex-direction: column;
align-items: center;
gap: 10px;
}
.spinner-circle {
width: 50px;
height: 50px;
border: 4px solid rgba(79, 195, 247, 0.2);
border-top-color: #4FC3F7;
border-radius: 50%;
animation: spin 0.8s linear infinite;
}
.spinner-text {
color: #4FC3F7;
font-size: 14px;
font-weight: 500;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
.loading-overlay {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: rgba(0, 0, 0, 0.3);
z-index: 999;
display: none;
}
.loading-overlay.active {
display: block;
}
</style>
</head>
<body>
<div class="loading-overlay" id="loadingOverlay"></div>
<div class="loading-spinner" id="loadingSpinner">
<div class="spinner-circle"></div>
<div class="spinner-text">Calculating gradient...</div>
</div>
<div class="container">
<div class="main-area">
<h1>go_emotions Gradient Space - OKLab Edition</h1>
<div class="subtitle">Drag centroids to reposition emotions. Colors blend in perceptually uniform OKLab space.</div>
<canvas id="gradientCanvas" width="800" height="800"></canvas>
<div class="info">
<div>Hover to see emotion weights | Click and drag centroids to move</div>
<div id="coordinates" style="margin-top: 5px;">Position: (-, -)</div>
<div class="weights" id="weights"></div>
</div>
</div>
<div class="controls">
<button class="export-btn" onclick="exportConfiguration()">Export Configuration</button>
<h2>Emotion Colors</h2>
<div class="hint">Click to edit colors for each emotion</div>
<div id="colorControls"></div>
</div>
</div>
<script>
// OKLab color space conversion functions
// sRGB to Linear RGB
function srgbToLinear(c) {
const abs = Math.abs(c);
if (abs <= 0.04045) {
return c / 12.92;
}
return Math.sign(c) * Math.pow((abs + 0.055) / 1.055, 2.4);
}
// Linear RGB to sRGB
function linearToSrgb(c) {
const abs = Math.abs(c);
if (abs <= 0.0031308) {
return c * 12.92;
}
return Math.sign(c) * (1.055 * Math.pow(abs, 1 / 2.4) - 0.055);
}
// RGB (0-255) to OKLab
function rgbToOklab(r, g, b) {
// Normalize to 0-1
r = r / 255;
g = g / 255;
b = b / 255;
// Convert to linear RGB
r = srgbToLinear(r);
g = srgbToLinear(g);
b = srgbToLinear(b);
// Linear RGB to LMS
const l = 0.4122214708 * r + 0.5363325363 * g + 0.0514459929 * b;
const m = 0.2119034982 * r + 0.6806995451 * g + 0.1073969566 * b;
const s = 0.0883024619 * r + 0.2817188376 * g + 0.6299787005 * b;
// LMS to OKLab
const l_ = Math.cbrt(l);
const m_ = Math.cbrt(m);
const s_ = Math.cbrt(s);
return {
L: 0.2104542553 * l_ + 0.7936177850 * m_ - 0.0040720468 * s_,
a: 1.9779984951 * l_ - 2.4285922050 * m_ + 0.4505937099 * s_,
b: 0.0259040371 * l_ + 0.7827717662 * m_ - 0.8086757660 * s_
};
}
// OKLab to RGB (0-255)
function oklabToRgb(L, a, b) {
// OKLab to LMS
const l_ = L + 0.3963377774 * a + 0.2158037573 * b;
const m_ = L - 0.1055613458 * a - 0.0638541728 * b;
const s_ = L - 0.0894841775 * a - 1.2914855480 * b;
const l = l_ * l_ * l_;
const m = m_ * m_ * m_;
const s = s_ * s_ * s_;
// LMS to linear RGB
let r = +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s;
let g = -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s;
let b_ = -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s;
// Linear RGB to sRGB
r = linearToSrgb(r);
g = linearToSrgb(g);
b_ = linearToSrgb(b_);
// Clamp and convert to 0-255
r = Math.max(0, Math.min(1, r)) * 255;
g = Math.max(0, Math.min(1, g)) * 255;
b_ = Math.max(0, Math.min(1, b_)) * 255;
return [r, g, b_];
}
const emotions = [
{ name: 'admiration', color: [255, 107, 107] },
{ name: 'amusement', color: [255, 217, 61] },
{ name: 'anger', color: [211, 47, 47] },
{ name: 'annoyance', color: [245, 124, 0] },
{ name: 'approval', color: [102, 187, 106] },
{ name: 'caring', color: [255, 182, 193] },
{ name: 'confusion', color: [156, 39, 176] },
{ name: 'curiosity', color: [79, 195, 247] },
{ name: 'desire', color: [233, 30, 99] },
{ name: 'disappointment', color: [109, 76, 65] },
{ name: 'disapproval', color: [139, 69, 19] },
{ name: 'disgust', color: [85, 139, 47] },
{ name: 'embarrassment', color: [255, 152, 0] },
{ name: 'excitement', color: [255, 241, 118] },
{ name: 'fear', color: [66, 66, 66] },
{ name: 'gratitude', color: [255, 224, 130] },
{ name: 'grief', color: [55, 71, 79] },
{ name: 'joy', color: [255, 235, 59] },
{ name: 'love', color: [255, 64, 129] },
{ name: 'nervousness', color: [126, 87, 194] },
{ name: 'optimism', color: [129, 199, 132] },
{ name: 'pride', color: [255, 213, 79] },
{ name: 'realization', color: [77, 208, 225] },
{ name: 'relief', color: [174, 213, 129] },
{ name: 'remorse', color: [186, 104, 200] },
{ name: 'sadness', color: [92, 107, 192] },
{ name: 'surprise', color: [255, 111, 0] },
{ name: 'neutral', color: [144, 164, 174] }
];
const canvas = document.getElementById('gradientCanvas');
const ctx = canvas.getContext('2d');
const width = canvas.width;
const height = canvas.height;
const centerX = width / 2;
const centerY = height / 2;
const radius = Math.min(width, height) * 0.4;
// Clear canvas to black initially
ctx.fillStyle = '#000000';
ctx.fillRect(0, 0, width, height);
// Position emotions in a circle
emotions.forEach((emotion, i) => {
const angle = (i / emotions.length) * Math.PI * 2;
emotion.x = centerX + Math.cos(angle) * radius;
emotion.y = centerY + Math.sin(angle) * radius;
});
// Dragging state
let draggedEmotion = null;
let isDragging = false;
let gradientImageData = null;
let animationFrameId = null;
let pendingUpdate = false;
// Initialize color controls
function initColorControls() {
const controlsDiv = document.getElementById('colorControls');
emotions.forEach((emotion, idx) => {
const div = document.createElement('div');
div.className = 'emotion-control';
const label = document.createElement('label');
label.textContent = emotion.name;
const input = document.createElement('input');
input.type = 'color';
input.id = `color-${idx}`;
const hexColor = `#${emotion.color.map(c => Math.round(c).toString(16).padStart(2, '0')).join('')}`;
input.value = hexColor;
const updateColor = (e) => {
const hex = e.target.value;
const r = parseInt(hex.substring(1, 3), 16);
const g = parseInt(hex.substring(3, 5), 16);
const b = parseInt(hex.substring(5, 7), 16);
emotions[idx].color = [r, g, b];
redrawGradient();
};
input.addEventListener('input', updateColor);
input.addEventListener('change', updateColor);
div.appendChild(label);
div.appendChild(input);
controlsDiv.appendChild(div);
});
}
// Loading indicator helpers
function showLoading() {
document.getElementById('loadingOverlay').classList.add('active');
document.getElementById('loadingSpinner').classList.add('active');
}
function hideLoading() {
document.getElementById('loadingOverlay').classList.remove('active');
document.getElementById('loadingSpinner').classList.remove('active');
}
// Calculate and cache the gradient
function calculateGradient() {
const imageData = ctx.createImageData(width, height);
const data = imageData.data;
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
const idx = (y * width + x) * 4;
// Calculate weights using inverse distance
let totalWeight = 0;
const weights = [];
emotions.forEach(emotion => {
const dx = x - emotion.x;
const dy = y - emotion.y;
const dist = Math.sqrt(dx * dx + dy * dy);
const weight = 1 / (Math.pow(dist, 2.5) + 1);
weights.push(weight);
totalWeight += weight;
});
// Normalize weights and blend colors in OKLab space
let L = 0, a = 0, b = 0;
weights.forEach((weight, i) => {
const normalizedWeight = weight / totalWeight;
const lab = rgbToOklab(...emotions[i].color);
L += lab.L * normalizedWeight;
a += lab.a * normalizedWeight;
b += lab.b * normalizedWeight;
});
// Convert back to RGB
const [r, g, b_] = oklabToRgb(L, a, b);
data[idx] = r;
data[idx + 1] = g;
data[idx + 2] = b_;
data[idx + 3] = 255;
}
}
gradientImageData = imageData;
}
// Redraw the entire gradient
function redrawGradient() {
showLoading();
// Use setTimeout to allow the loading spinner to render before blocking
setTimeout(() => {
calculateGradient();
renderCanvas();
hideLoading();
}, 50);
}
// Render the canvas (gradient + points)
function renderCanvas() {
ctx.putImageData(gradientImageData, 0, 0);
drawEmotionPoints();
}
// Schedule a render using requestAnimationFrame
function scheduleRender() {
if (!pendingUpdate) {
pendingUpdate = true;
requestAnimationFrame(() => {
renderCanvas();
pendingUpdate = false;
});
}
}
// Draw emotion labels and centroids
function drawEmotionPoints() {
ctx.font = '12px monospace';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
emotions.forEach((emotion, i) => {
// Draw a larger circle at each emotion point for better dragging
ctx.fillStyle = `rgb(${emotion.color[0]}, ${emotion.color[1]}, ${emotion.color[2]})`;
ctx.strokeStyle = '#fff';
ctx.lineWidth = 2;
ctx.beginPath();
ctx.arc(emotion.x, emotion.y, 8, 0, Math.PI * 2);
ctx.fill();
ctx.stroke();
// Draw label with background
const dx = emotion.x - centerX;
const dy = emotion.y - centerY;
const angle = Math.atan2(dy, dx);
const labelRadius = Math.sqrt(dx * dx + dy * dy) + 30;
const labelX = centerX + Math.cos(angle) * labelRadius;
const labelY = centerY + Math.sin(angle) * labelRadius;
ctx.fillStyle = 'rgba(0, 0, 0, 0.7)';
const textWidth = ctx.measureText(emotion.name).width;
ctx.fillRect(labelX - textWidth/2 - 3, labelY - 8, textWidth + 6, 16);
ctx.fillStyle = '#fff';
ctx.fillText(emotion.name, labelX, labelY);
});
}
// Mouse event handlers
canvas.addEventListener('mousedown', (e) => {
const rect = canvas.getBoundingClientRect();
const x = (e.clientX - rect.left) * (canvas.width / rect.width);
const y = (e.clientY - rect.top) * (canvas.height / rect.height);
// Check if clicking on any emotion centroid (larger hit area for easier clicking)
for (const emotion of emotions) {
const dx = x - emotion.x;
const dy = y - emotion.y;
const dist = Math.sqrt(dx * dx + dy * dy);
if (dist < 25) { // Increased from 15 to 25 for easier clicking
draggedEmotion = emotion;
isDragging = true;
canvas.classList.add('dragging');
e.preventDefault();
return;
}
}
});
canvas.addEventListener('mousemove', (e) => {
const rect = canvas.getBoundingClientRect();
const x = (e.clientX - rect.left) * (canvas.width / rect.width);
const y = (e.clientY - rect.top) * (canvas.height / rect.height);
if (isDragging && draggedEmotion) {
e.preventDefault();
draggedEmotion.x = Math.max(0, Math.min(width, x));
draggedEmotion.y = Math.max(0, Math.min(height, y));
// Use requestAnimationFrame for smooth updates
scheduleRender();
} else {
// Check if hovering over any centroid
let isHovering = false;
for (const emotion of emotions) {
const dx = x - emotion.x;
const dy = y - emotion.y;
const dist = Math.sqrt(dx * dx + dy * dy);
if (dist < 25) {
isHovering = true;
break;
}
}
// Update cursor
if (isHovering) {
canvas.classList.add('hovering');
} else {
canvas.classList.remove('hovering');
}
showWeights(Math.floor(x), Math.floor(y));
}
});
canvas.addEventListener('mouseup', () => {
if (isDragging) {
// Recalculate gradient when drag ends
redrawGradient();
}
isDragging = false;
draggedEmotion = null;
canvas.classList.remove('dragging');
canvas.classList.remove('hovering');
});
canvas.addEventListener('mouseleave', () => {
if (isDragging) {
// Recalculate gradient when drag ends
redrawGradient();
}
isDragging = false;
draggedEmotion = null;
canvas.classList.remove('dragging');
canvas.classList.remove('hovering');
});
// Interactive hover/click
function showWeights(x, y) {
const coordDiv = document.getElementById('coordinates');
const weightsDiv = document.getElementById('weights');
coordDiv.textContent = `Position: (${x}, ${y})`;
// Calculate weights for this position
let totalWeight = 0;
const weights = [];
emotions.forEach(emotion => {
const dx = x - emotion.x;
const dy = y - emotion.y;
const dist = Math.sqrt(dx * dx + dy * dy);
const weight = 1 / (Math.pow(dist, 2.5) + 1);
weights.push(weight);
totalWeight += weight;
});
// Sort by weight descending
const sortedEmotions = emotions.map((e, i) => ({
name: e.name,
weight: weights[i] / totalWeight
})).sort((a, b) => b.weight - a.weight);
weightsDiv.innerHTML = sortedEmotions
.filter(e => e.weight > 0.01)
.map(e => `
<div>
<div class="weight-item">
<span>${e.name}</span>
<span>${(e.weight * 100).toFixed(1)}%</span>
</div>
<div class="weight-bar">
<div class="weight-fill" style="width: ${e.weight * 100}%"></div>
</div>
</div>
`).join('');
}
// Export configuration
function exportConfiguration() {
const config = {
colorSpace: 'oklab',
canvasSize: { width, height },
emotions: emotions.map(e => ({
name: e.name,
position: { x: e.x, y: e.y },
color: { r: e.color[0], g: e.color[1], b: e.color[2] }
})),
metadata: {
exportDate: new Date().toISOString(),
version: '1.0'
}
};
const dataStr = JSON.stringify(config, null, 2);
const dataBlob = new Blob([dataStr], { type: 'application/json' });
const url = URL.createObjectURL(dataBlob);
const link = document.createElement('a');
link.href = url;
link.download = `emotion-gradient-config-${Date.now()}.json`;
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
URL.revokeObjectURL(url);
console.log('Configuration exported:', config);
}
// Initialize
initColorControls();
redrawGradient();
</script>
</body>
</html>