honestly fixed so much and forgot to commit

Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
2025-12-28 17:39:27 +00:00
parent 28909e8b76
commit e890b0213a
47 changed files with 2248 additions and 438 deletions

View File

@@ -46,4 +46,7 @@ pub enum EngineCommand {
// Clock
TickClock,
// Lifecycle
Shutdown,
}

View File

@@ -44,13 +44,19 @@ impl EngineCore {
// Process commands as they arrive
while let Some(cmd) = self.handle.command_rx.recv().await {
self.handle_command(cmd).await;
let should_continue = self.handle_command(cmd).await;
if !should_continue {
tracing::info!("EngineCore received shutdown command");
break;
}
}
tracing::info!("EngineCore shutting down (command channel closed)");
tracing::info!("EngineCore shutting down");
}
async fn handle_command(&mut self, cmd: EngineCommand) {
/// Handle a command from Bevy
/// Returns true to continue running, false to shutdown
async fn handle_command(&mut self, cmd: EngineCommand) -> bool {
match cmd {
EngineCommand::StartNetworking { session_id } => {
self.start_networking(session_id).await;
@@ -74,11 +80,16 @@ impl EngineCore {
EngineCommand::TickClock => {
self.tick_clock();
}
EngineCommand::Shutdown => {
tracing::info!("Shutdown command received");
return false;
}
// TODO: Handle CRDT and lock commands in Phase 2
_ => {
tracing::debug!("Unhandled command: {:?}", cmd);
}
}
true
}
fn tick_clock(&mut self) {
@@ -98,6 +109,25 @@ impl EngineCore {
tracing::info!("Starting networking initialization for session {}", session_id.to_code());
// Test mode: Skip actual networking and send event immediately
#[cfg(feature = "fast_tests")]
{
let bridge = crate::networking::GossipBridge::new(self.node_id);
let _ = self.handle.event_tx.send(EngineEvent::NetworkingStarted {
session_id: session_id.clone(),
node_id: self.node_id,
bridge,
});
tracing::info!("Networking started (test mode) for session {}", session_id.to_code());
// Create a dummy task that just waits
let task = tokio::spawn(async {
tokio::time::sleep(tokio::time::Duration::from_secs(3600)).await;
});
self.networking_task = Some(task);
return;
}
// Create cancellation token for graceful shutdown
let cancel_token = CancellationToken::new();
let cancel_token_clone = cancel_token.clone();
@@ -105,10 +135,10 @@ impl EngineCore {
// Spawn NetworkingManager initialization in background to avoid blocking
// DHT peer discovery can take 15+ seconds with retries
let event_tx = self.handle.event_tx.clone();
// Create channel for progress updates
let (progress_tx, mut progress_rx) = tokio::sync::mpsc::unbounded_channel();
// Spawn task to forward progress updates to Bevy
let event_tx_clone = event_tx.clone();
let session_id_clone = session_id.clone();
@@ -120,7 +150,7 @@ impl EngineCore {
});
}
});
let task = tokio::spawn(async move {
match NetworkingManager::new(session_id.clone(), Some(progress_tx), cancel_token_clone.clone()).await {
Ok((net_manager, bridge)) => {

View File

@@ -4,9 +4,29 @@ use crate::networking::{NodeId, SessionId, VectorClock};
use bevy::prelude::*;
use uuid::Uuid;
#[derive(Debug, Clone)]
pub enum NetworkingInitStatus {
CreatingEndpoint,
EndpointReady,
DiscoveringPeers {
session_code: String,
attempt: u8,
},
PeersFound {
count: usize,
},
NoPeersFound,
PublishingToDHT,
InitializingGossip,
}
#[derive(Debug, Clone)]
pub enum EngineEvent {
// Networking status
NetworkingInitializing {
session_id: SessionId,
status: NetworkingInitStatus,
},
NetworkingStarted {
session_id: SessionId,
node_id: NodeId,

View File

@@ -14,12 +14,13 @@ mod core;
mod events;
mod game_actions;
mod networking;
mod peer_discovery;
mod persistence;
pub use bridge::{EngineBridge, EngineHandle};
pub use commands::EngineCommand;
pub use core::EngineCore;
pub use events::EngineEvent;
pub use events::{EngineEvent, NetworkingInitStatus};
pub use game_actions::GameAction;
pub use networking::NetworkingManager;
pub use persistence::PersistenceManager;

View File

@@ -249,9 +249,31 @@ impl NetworkingManager {
}
Event::NeighborUp(peer) => {
tracing::info!("Peer connected: {}", peer);
// Convert PublicKey to NodeId for Bevy
let peer_bytes = peer.as_bytes();
let mut node_id_bytes = [0u8; 16];
node_id_bytes.copy_from_slice(&peer_bytes[..16]);
let peer_node_id = NodeId::from_bytes(node_id_bytes);
// Notify Bevy of peer join
let _ = event_tx.send(EngineEvent::PeerJoined {
node_id: peer_node_id,
});
}
Event::NeighborDown(peer) => {
tracing::warn!("Peer disconnected: {}", peer);
// Convert PublicKey to NodeId for Bevy
let peer_bytes = peer.as_bytes();
let mut node_id_bytes = [0u8; 16];
node_id_bytes.copy_from_slice(&peer_bytes[..16]);
let peer_node_id = NodeId::from_bytes(node_id_bytes);
// Notify Bevy of peer leave
let _ = event_tx.send(EngineEvent::PeerLeft {
node_id: peer_node_id,
});
}
Event::Lagged => {
tracing::warn!("Event stream lagged");

View File

@@ -0,0 +1,151 @@
//! DHT-based peer discovery for session collaboration
//!
//! Each peer publishes their EndpointId to the DHT using a session-derived pkarr key.
//! Other peers query the DHT to discover all peers in the session.
use anyhow::Result;
use iroh::EndpointId;
use std::time::Duration;
use crate::networking::SessionId;
pub async fn publish_peer_to_dht(
session_id: &SessionId,
our_endpoint_id: EndpointId,
dht_client: &pkarr::Client,
) -> Result<()> {
use pkarr::dns::{self, rdata};
use pkarr::dns::rdata::RData;
let keypair = session_id.to_pkarr_keypair();
let public_key = keypair.public_key();
// Query DHT for existing peers in this session
let existing_peers = match dht_client.resolve(&public_key).await {
Some(packet) => {
let mut peers = Vec::new();
for rr in packet.all_resource_records() {
if let RData::TXT(txt) = &rr.rdata {
if let Ok(txt_str) = String::try_from(txt.clone()) {
if let Some(hex) = txt_str.strip_prefix("peer=") {
if let Ok(bytes) = hex::decode(hex) {
if bytes.len() == 32 {
if let Ok(endpoint_id) = EndpointId::from_bytes(&bytes.try_into().unwrap()) {
// Don't include ourselves if we're already in the list
if endpoint_id != our_endpoint_id {
peers.push(endpoint_id);
}
}
}
}
}
}
}
}
peers
}
None => Vec::new(),
};
// Build packet with all peers (existing + ourselves)
let name = dns::Name::new("_peers").expect("constant");
let mut builder = pkarr::SignedPacket::builder();
// Add TXT record for each existing peer
for peer in existing_peers {
let peer_hex = hex::encode(peer.as_bytes());
let peer_str = format!("peer={}", peer_hex);
let mut txt = rdata::TXT::new();
txt.add_string(&peer_str)?;
builder = builder.txt(name.clone(), txt.into_owned(), 3600);
}
// Add TXT record for ourselves
let our_hex = hex::encode(our_endpoint_id.as_bytes());
let our_str = format!("peer={}", our_hex);
let mut our_txt = rdata::TXT::new();
our_txt.add_string(&our_str)?;
builder = builder.txt(name, our_txt.into_owned(), 3600);
// Build and sign the packet
let signed_packet = builder.build(&keypair)?;
// Publish to DHT
dht_client.publish(&signed_packet, None).await?;
tracing::info!(
"Published peer {} to DHT for session {}",
our_endpoint_id.fmt_short(),
session_id.to_code()
);
Ok(())
}
pub async fn discover_peers_from_dht(
session_id: &SessionId,
dht_client: &pkarr::Client,
) -> Result<Vec<EndpointId>> {
use pkarr::dns::rdata::RData;
let keypair = session_id.to_pkarr_keypair();
let public_key = keypair.public_key();
// Query DHT for the session's public key
let signed_packet = match dht_client.resolve(&public_key).await {
Some(packet) => packet,
None => {
tracing::debug!("No peers found in DHT for session {}", session_id.to_code());
return Ok(vec![]);
}
};
// Parse TXT records to extract peer endpoint IDs
let mut peers = Vec::new();
for rr in signed_packet.all_resource_records() {
if let RData::TXT(txt) = &rr.rdata {
// Try to parse as a String
if let Ok(txt_str) = String::try_from(txt.clone()) {
// Parse "peer=<hex_endpoint_id>"
if let Some(hex) = txt_str.strip_prefix("peer=") {
if let Ok(bytes) = hex::decode(hex) {
if bytes.len() == 32 {
if let Ok(endpoint_id) = EndpointId::from_bytes(&bytes.try_into().unwrap()) {
peers.push(endpoint_id);
}
}
}
}
}
}
}
tracing::info!(
"Discovered {} peers from DHT for session {}",
peers.len(),
session_id.to_code()
);
Ok(peers)
}
/// Periodically republishes our presence to the DHT
///
/// Should be called in a background task to maintain our DHT presence.
/// Republishes every 30 minutes (well before the 1-hour TTL expires).
pub async fn maintain_dht_presence(
session_id: SessionId,
our_endpoint_id: EndpointId,
dht_client: pkarr::Client,
) {
let mut interval = tokio::time::interval(Duration::from_secs(30 * 60)); // 30 minutes
loop {
interval.tick().await;
if let Err(e) = publish_peer_to_dht(&session_id, our_endpoint_id, &dht_client).await {
tracing::warn!("Failed to republish to DHT: {}", e);
}
}
}