Files
sol/src/brain/responder.rs
Sienna Meridian Satterwhite 1058afb635 add TimeContext: 25 pre-computed time values for the model
midnight-based day boundaries (today, yesterday, 2 days ago),
week/month boundaries, rolling offsets (1h to 30d). injected
into system prompt via {time_block} and per-message via compact
time line. models no longer need to compute epoch timestamps.
2026-03-23 01:41:44 +00:00

615 lines
22 KiB
Rust

use std::sync::Arc;
use mistralai_client::v1::{
chat::{ChatMessage, ChatParams, ChatResponse, ChatResponseChoiceFinishReason},
constants::Model,
conversations::{ConversationEntry, ConversationInput, FunctionResultEntry},
error::ApiError,
tool::ToolChoice,
};
use rand::Rng;
use tokio::time::{sleep, Duration};
use tracing::{debug, error, info, warn};
use matrix_sdk::room::Room;
use opensearch::OpenSearch;
use crate::agent_ux::AgentProgress;
use crate::brain::conversation::ContextMessage;
use crate::brain::personality::Personality;
use crate::config::Config;
use crate::context::ResponseContext;
use crate::conversations::ConversationRegistry;
use crate::memory;
use crate::time_context::TimeContext;
use crate::tools::ToolRegistry;
/// Run a Mistral chat completion on a blocking thread.
///
/// The mistral client's `chat_async` holds a `std::sync::MutexGuard` across an
/// `.await` point, making the future !Send. We use the synchronous `chat()`
/// method via `spawn_blocking` instead.
pub(crate) async fn chat_blocking(
client: &Arc<mistralai_client::v1::client::Client>,
model: Model,
messages: Vec<ChatMessage>,
params: ChatParams,
) -> Result<ChatResponse, ApiError> {
let client = Arc::clone(client);
tokio::task::spawn_blocking(move || client.chat(model, messages, Some(params)))
.await
.map_err(|e| ApiError {
message: format!("spawn_blocking join error: {e}"),
})?
}
pub struct Responder {
config: Arc<Config>,
personality: Arc<Personality>,
tools: Arc<ToolRegistry>,
opensearch: OpenSearch,
}
impl Responder {
pub fn new(
config: Arc<Config>,
personality: Arc<Personality>,
tools: Arc<ToolRegistry>,
opensearch: OpenSearch,
) -> Self {
Self {
config,
personality,
tools,
opensearch,
}
}
pub async fn generate_response(
&self,
context: &[ContextMessage],
trigger_body: &str,
trigger_sender: &str,
room_name: &str,
members: &[String],
is_spontaneous: bool,
mistral: &Arc<mistralai_client::v1::client::Client>,
room: &Room,
response_ctx: &ResponseContext,
image_data_uri: Option<&str>,
) -> Option<String> {
// Apply response delay (skip if instant_responses is enabled)
// Delay happens BEFORE typing indicator — Sol "notices" the message first
if !self.config.behavior.instant_responses {
let delay = if is_spontaneous {
rand::thread_rng().gen_range(
self.config.behavior.spontaneous_delay_min_ms
..=self.config.behavior.spontaneous_delay_max_ms,
)
} else {
rand::thread_rng().gen_range(
self.config.behavior.response_delay_min_ms
..=self.config.behavior.response_delay_max_ms,
)
};
debug!(delay_ms = delay, is_spontaneous, "Applying response delay");
sleep(Duration::from_millis(delay)).await;
}
// Start typing AFTER the delay — Sol has decided to respond
let _ = room.typing_notice(true).await;
// Pre-response memory query
let memory_notes = self
.load_memory_notes(response_ctx, trigger_body)
.await;
let system_prompt = self.personality.build_system_prompt(
room_name,
members,
memory_notes.as_deref(),
response_ctx.is_dm,
);
let mut messages = vec![ChatMessage::new_system_message(&system_prompt)];
// Add context messages with timestamps so the model has time awareness
for msg in context {
let ts = chrono::DateTime::from_timestamp_millis(msg.timestamp)
.map(|d| d.format("%H:%M").to_string())
.unwrap_or_default();
if msg.sender == self.config.matrix.user_id {
messages.push(ChatMessage::new_assistant_message(&msg.content, None));
} else {
let user_msg = format!("[{}] {}: {}", ts, msg.sender, msg.content);
messages.push(ChatMessage::new_user_message(&user_msg));
}
}
// Add the triggering message (multimodal if image attached)
if let Some(data_uri) = image_data_uri {
use mistralai_client::v1::chat::{ContentPart, ImageUrl};
let mut parts = vec![];
if !trigger_body.is_empty() {
parts.push(ContentPart::Text {
text: format!("{trigger_sender}: {trigger_body}"),
});
}
parts.push(ContentPart::ImageUrl {
image_url: ImageUrl {
url: data_uri.to_string(),
detail: None,
},
});
messages.push(ChatMessage::new_user_message_with_images(parts));
} else {
let trigger = format!("{trigger_sender}: {trigger_body}");
messages.push(ChatMessage::new_user_message(&trigger));
}
let tool_defs = ToolRegistry::tool_definitions(self.tools.has_gitea(), self.tools.has_kratos());
let model = Model::new(&self.config.mistral.default_model);
let max_iterations = self.config.mistral.max_tool_iterations;
for iteration in 0..=max_iterations {
let params = ChatParams {
tools: if iteration < max_iterations {
Some(tool_defs.clone())
} else {
None
},
tool_choice: if iteration < max_iterations {
Some(ToolChoice::Auto)
} else {
None
},
..Default::default()
};
let response = match chat_blocking(mistral, model.clone(), messages.clone(), params).await {
Ok(r) => r,
Err(e) => {
let _ = room.typing_notice(false).await;
error!("Mistral chat failed: {e}");
return None;
}
};
let choice = &response.choices[0];
if choice.finish_reason == ChatResponseChoiceFinishReason::ToolCalls {
if let Some(tool_calls) = &choice.message.tool_calls {
// Add assistant message with tool calls
messages.push(ChatMessage::new_assistant_message(
&choice.message.content.text(),
Some(tool_calls.clone()),
));
for tc in tool_calls {
let call_id = tc.id.as_deref().unwrap_or("unknown");
info!(
tool = tc.function.name.as_str(),
id = call_id,
args = tc.function.arguments.as_str(),
"Executing tool call"
);
let result = self
.tools
.execute(&tc.function.name, &tc.function.arguments, response_ctx)
.await;
let result_str = match result {
Ok(s) => {
let preview: String = s.chars().take(500).collect();
info!(
tool = tc.function.name.as_str(),
id = call_id,
result_len = s.len(),
result_preview = preview.as_str(),
"Tool call result"
);
s
}
Err(e) => {
warn!(tool = tc.function.name.as_str(), "Tool failed: {e}");
format!("Error: {e}")
}
};
messages.push(ChatMessage::new_tool_message(
&result_str,
call_id,
Some(&tc.function.name),
));
}
debug!(iteration, "Tool iteration complete, continuing");
continue;
}
}
// Final text response — strip own name prefix if present
let mut text = choice.message.content.text().trim().to_string();
// Strip "sol:" or "sol 💕:" or similar prefixes the model sometimes adds
let lower = text.to_lowercase();
for prefix in &["sol:", "sol 💕:", "sol💕:"] {
if lower.starts_with(prefix) {
text = text[prefix.len()..].trim().to_string();
break;
}
}
if text.is_empty() {
info!("Generated empty response, skipping send");
let _ = room.typing_notice(false).await;
return None;
}
let preview: String = text.chars().take(120).collect();
let _ = room.typing_notice(false).await;
info!(
response_len = text.len(),
response_preview = preview.as_str(),
is_spontaneous,
tool_iterations = iteration,
"Generated response"
);
return Some(text);
}
let _ = room.typing_notice(false).await;
warn!("Exceeded max tool iterations");
None
}
/// Generate a response using the Mistral Conversations API.
/// This path routes through the ConversationRegistry for persistent state,
/// agent handoffs, and function calling with UX feedback (reactions + threads).
pub async fn generate_response_conversations(
&self,
trigger_body: &str,
trigger_sender: &str,
room_id: &str,
room_name: &str,
is_dm: bool,
is_spontaneous: bool,
mistral: &Arc<mistralai_client::v1::client::Client>,
room: &Room,
response_ctx: &ResponseContext,
conversation_registry: &ConversationRegistry,
image_data_uri: Option<&str>,
context_hint: Option<String>,
event_id: ruma::OwnedEventId,
) -> Option<String> {
// Apply response delay
if !self.config.behavior.instant_responses {
let delay = if is_spontaneous {
rand::thread_rng().gen_range(
self.config.behavior.spontaneous_delay_min_ms
..=self.config.behavior.spontaneous_delay_max_ms,
)
} else {
rand::thread_rng().gen_range(
self.config.behavior.response_delay_min_ms
..=self.config.behavior.response_delay_max_ms,
)
};
sleep(Duration::from_millis(delay)).await;
}
let _ = room.typing_notice(true).await;
// Pre-response memory query (same as legacy path)
let memory_notes = self.load_memory_notes(response_ctx, trigger_body).await;
// Build the input message with dynamic context.
// Agent instructions are static (set at creation), so per-message context
// (timestamps, room, members, memory) is prepended to each user message.
let tc = TimeContext::now();
let mut context_header = format!(
"{}\n[room: {} ({})]",
tc.message_line(),
room_name,
room_id,
);
if let Some(ref notes) = memory_notes {
context_header.push('\n');
context_header.push_str(notes);
}
let user_msg = if is_dm {
trigger_body.to_string()
} else {
format!("<{}> {}", response_ctx.matrix_user_id, trigger_body)
};
let input_text = format!("{context_header}\n{user_msg}");
let input = ConversationInput::Text(input_text);
// Send through conversation registry
let response = match conversation_registry
.send_message(room_id, input, is_dm, mistral, context_hint.as_deref())
.await
{
Ok(r) => r,
Err(e) => {
error!("Conversation API failed: {e}");
let _ = room.typing_notice(false).await;
return None;
}
};
// Check for function calls — execute locally and send results back
let function_calls = response.function_calls();
if !function_calls.is_empty() {
// Agent UX: react with 🔍 and post tool details in a thread
let mut progress = crate::agent_ux::AgentProgress::new(
room.clone(),
event_id.clone(),
);
progress.start().await;
let max_iterations = self.config.mistral.max_tool_iterations;
let mut current_response = response;
for iteration in 0..max_iterations {
let calls = current_response.function_calls();
if calls.is_empty() {
break;
}
let mut result_entries = Vec::new();
for fc in &calls {
let call_id = fc.tool_call_id.as_deref().unwrap_or("unknown");
info!(
tool = fc.name.as_str(),
id = call_id,
args = fc.arguments.as_str(),
"Executing tool call (conversations)"
);
// Post tool call to thread
progress
.post_step(&crate::agent_ux::AgentProgress::format_tool_call(
&fc.name,
&fc.arguments,
))
.await;
let result = if fc.name == "research" {
self.tools
.execute_research(
&fc.arguments,
response_ctx,
room,
&event_id,
0, // depth 0 — orchestrator level
)
.await
} else {
self.tools
.execute(&fc.name, &fc.arguments, response_ctx)
.await
};
let result_str = match result {
Ok(s) => {
let preview: String = s.chars().take(500).collect();
info!(
tool = fc.name.as_str(),
id = call_id,
result_len = s.len(),
result_preview = preview.as_str(),
"Tool call result (conversations)"
);
s
}
Err(e) => {
warn!(tool = fc.name.as_str(), "Tool failed: {e}");
format!("Error: {e}")
}
};
result_entries.push(ConversationEntry::FunctionResult(FunctionResultEntry {
tool_call_id: call_id.to_string(),
result: result_str,
id: None,
object: None,
created_at: None,
completed_at: None,
}));
}
// Send function results back to conversation
current_response = match conversation_registry
.send_function_result(room_id, result_entries, mistral)
.await
{
Ok(r) => r,
Err(e) => {
error!("Failed to send function results: {e}");
let _ = room.typing_notice(false).await;
return None;
}
};
debug!(iteration, "Tool iteration complete (conversations)");
}
// Done with tool calls
progress.done().await;
// Extract final text from the last response
if let Some(text) = current_response.assistant_text() {
let text = strip_sol_prefix(&text);
if text.is_empty() {
let _ = room.typing_notice(false).await;
return None;
}
let _ = room.typing_notice(false).await;
info!(
response_len = text.len(),
"Generated response (conversations + tools)"
);
return Some(text);
}
let _ = room.typing_notice(false).await;
return None;
}
// Simple response — no tools involved
if let Some(text) = response.assistant_text() {
let text = strip_sol_prefix(&text);
if text.is_empty() {
let _ = room.typing_notice(false).await;
return None;
}
let _ = room.typing_notice(false).await;
info!(
response_len = text.len(),
is_spontaneous,
"Generated response (conversations)"
);
return Some(text);
}
let _ = room.typing_notice(false).await;
None
}
async fn load_memory_notes(
&self,
ctx: &ResponseContext,
trigger_body: &str,
) -> Option<String> {
let index = &self.config.opensearch.memory_index;
let user_id = &ctx.user_id;
// Search for topically relevant memories
let mut memories = memory::store::query(
&self.opensearch,
index,
user_id,
trigger_body,
5,
)
.await
.unwrap_or_default();
// Backfill with recent memories if we have fewer than 3
if memories.len() < 3 {
let remaining = 5 - memories.len();
if let Ok(recent) = memory::store::get_recent(
&self.opensearch,
index,
user_id,
remaining,
)
.await
{
let existing_ids: std::collections::HashSet<String> =
memories.iter().map(|m| m.id.clone()).collect();
for doc in recent {
if !existing_ids.contains(&doc.id) && memories.len() < 5 {
memories.push(doc);
}
}
}
}
if memories.is_empty() {
return None;
}
let display = ctx
.display_name
.as_deref()
.unwrap_or(&ctx.matrix_user_id);
Some(format_memory_notes(display, &memories))
}
}
/// Strip "sol:" or "sol 💕:" prefixes the model sometimes adds.
fn strip_sol_prefix(text: &str) -> String {
let trimmed = text.trim();
let lower = trimmed.to_lowercase();
for prefix in &["sol:", "sol 💕:", "sol💕:"] {
if lower.starts_with(prefix) {
return trimmed[prefix.len()..].trim().to_string();
}
}
trimmed.to_string()
}
/// Format memory documents into a notes block for the system prompt.
pub(crate) fn format_memory_notes(
display_name: &str,
memories: &[memory::schema::MemoryDocument],
) -> String {
let mut lines = vec![format!(
"## notes about {display_name}\n\n\
these are your private notes about the person you're talking to.\n\
use them to inform your responses but don't mention that you have notes.\n"
)];
for mem in memories {
lines.push(format!("- [{}] {}", mem.category, mem.content));
}
lines.join("\n")
}
#[cfg(test)]
mod tests {
use super::*;
use crate::memory::schema::MemoryDocument;
fn make_mem(id: &str, content: &str, category: &str) -> MemoryDocument {
MemoryDocument {
id: id.into(),
user_id: "sienna@sunbeam.pt".into(),
content: content.into(),
category: category.into(),
created_at: 1710000000000,
updated_at: 1710000000000,
source: "auto".into(),
}
}
#[test]
fn test_format_memory_notes_basic() {
let memories = vec![
make_mem("a", "prefers terse answers", "preference"),
make_mem("b", "working on drive UI", "fact"),
];
let result = format_memory_notes("sienna", &memories);
assert!(result.contains("## notes about sienna"));
assert!(result.contains("don't mention that you have notes"));
assert!(result.contains("- [preference] prefers terse answers"));
assert!(result.contains("- [fact] working on drive UI"));
}
#[test]
fn test_format_memory_notes_single() {
let memories = vec![make_mem("x", "birthday is march 12", "context")];
let result = format_memory_notes("lonni", &memories);
assert!(result.contains("## notes about lonni"));
assert!(result.contains("- [context] birthday is march 12"));
}
#[test]
fn test_format_memory_notes_uses_display_name() {
let memories = vec![make_mem("a", "test", "general")];
let result = format_memory_notes("Amber", &memories);
assert!(result.contains("## notes about Amber"));
}
}