feat: per-user auto-memory with ResponseContext

Three memory channels: hidden tool (sol.memory.set/get in scripts),
pre-response injection (relevant memories loaded into system prompt),
and post-response extraction (ministral-3b extracts facts after each
response). User isolation enforced at Rust level — user_id derived
from Matrix sender, never from script arguments.

New modules: context (ResponseContext), memory (schema, store, extractor).
ResponseContext threaded through responder → tools → script runtime.
OpenSearch index sol_user_memory created on startup alongside archive.
This commit is contained in:
2026-03-21 15:51:31 +00:00
parent 4dc20bee23
commit 4949e70ecc
23 changed files with 4494 additions and 124 deletions

View File

@@ -5,7 +5,7 @@ use mistralai_client::v1::{
constants::Model,
};
use regex::Regex;
use tracing::{debug, warn};
use tracing::{debug, info, warn};
use crate::config::Config;
@@ -13,6 +13,7 @@ use crate::config::Config;
pub enum Engagement {
MustRespond { reason: MustRespondReason },
MaybeRespond { relevance: f32, hook: String },
React { emoji: String, relevance: f32 },
Ignore,
}
@@ -33,7 +34,9 @@ impl Evaluator {
// todo(sienna): regex must be configrable
pub fn new(config: Arc<Config>) -> Self {
let user_id = &config.matrix.user_id;
let mention_pattern = regex::escape(user_id);
// Match both plain @sol:sunbeam.pt and Matrix link format [sol](https://matrix.to/#/@sol:sunbeam.pt)
let escaped = regex::escape(user_id);
let mention_pattern = format!(r"{}|matrix\.to/#/{}", escaped, escaped);
let mention_regex = Regex::new(&mention_pattern).expect("Failed to compile mention regex");
let name_regex =
Regex::new(r"(?i)(?:^|\bhey\s+)\bsol\b").expect("Failed to compile name regex");
@@ -53,13 +56,17 @@ impl Evaluator {
recent_messages: &[String],
mistral: &Arc<mistralai_client::v1::client::Client>,
) -> Engagement {
let body_preview: String = body.chars().take(80).collect();
// Don't respond to ourselves
if sender == self.config.matrix.user_id {
debug!(sender, body = body_preview.as_str(), "Ignoring own message");
return Engagement::Ignore;
}
// Direct mention: @sol:sunbeam.pt
if self.mention_regex.is_match(body) {
info!(sender, body = body_preview.as_str(), rule = "direct_mention", "Engagement: MustRespond");
return Engagement::MustRespond {
reason: MustRespondReason::DirectMention,
};
@@ -67,6 +74,7 @@ impl Evaluator {
// DM
if is_dm {
info!(sender, body = body_preview.as_str(), rule = "dm", "Engagement: MustRespond");
return Engagement::MustRespond {
reason: MustRespondReason::DirectMessage,
};
@@ -74,11 +82,22 @@ impl Evaluator {
// Name invocation: "sol ..." or "hey sol ..."
if self.name_regex.is_match(body) {
info!(sender, body = body_preview.as_str(), rule = "name_invocation", "Engagement: MustRespond");
return Engagement::MustRespond {
reason: MustRespondReason::NameInvocation,
};
}
info!(
sender, body = body_preview.as_str(),
threshold = self.config.behavior.spontaneous_threshold,
model = self.config.mistral.evaluation_model.as_str(),
context_len = recent_messages.len(),
eval_window = self.config.behavior.evaluation_context_window,
detect_sol = self.config.behavior.detect_sol_in_conversation,
"No rule match — running LLM relevance evaluation"
);
// Cheap evaluation call for spontaneous responses
self.evaluate_relevance(body, recent_messages, mistral)
.await
@@ -119,23 +138,56 @@ impl Evaluator {
recent_messages: &[String],
mistral: &Arc<mistralai_client::v1::client::Client>,
) -> Engagement {
let window = self.config.behavior.evaluation_context_window;
let context = recent_messages
.iter()
.rev()
.take(5) //todo(sienna): must be configurable
.take(window)
.rev()
.cloned()
.collect::<Vec<_>>()
.join("\n");
// Check if Sol recently participated in this conversation
let sol_in_context = self.config.behavior.detect_sol_in_conversation
&& recent_messages.iter().any(|m| {
let lower = m.to_lowercase();
lower.starts_with("sol:") || lower.starts_with("sol ") || lower.contains("@sol:")
});
let default_active = "Sol is ALREADY part of this conversation (see messages above from Sol). \
Messages that follow up on Sol's response, ask Sol a question, or continue \
a thread Sol is in should score HIGH (0.8+). Sol should respond to follow-ups \
directed at them even if not mentioned by name.".to_string();
let default_passive = "Sol has NOT spoken in this conversation yet. Only score high if the message \
is clearly relevant to Sol's expertise (archive search, finding past conversations, \
information retrieval) or touches a topic Sol has genuine insight on.".to_string();
let participation_note = if sol_in_context {
self.config.behavior.evaluation_prompt_active.as_deref()
.unwrap_or(&default_active)
} else {
self.config.behavior.evaluation_prompt_passive.as_deref()
.unwrap_or(&default_passive)
};
info!(
sol_in_context,
context_window = window,
"Building evaluation prompt"
);
let prompt = format!(
"You are evaluating whether a virtual librarian named Sol should spontaneously join \
a conversation. Sol has deep knowledge of the group's message archive and helps \
people find information.\n\n\
"You are evaluating whether Sol should respond to a message in a group chat. \
Sol is a librarian with access to the team's message archive.\n\n\
Recent conversation:\n{context}\n\n\
Latest message: {body}\n\n\
Respond ONLY with JSON: {{\"relevance\": 0.0-1.0, \"hook\": \"brief reason or empty string\"}}\n\
relevance=1.0 means Sol absolutely should respond, 0.0 means irrelevant."
{participation_note}\n\n\
Respond ONLY with JSON: {{\"relevance\": 0.0-1.0, \"hook\": \"brief reason or empty string\", \"emoji\": \"a single emoji reaction or empty string\"}}\n\
relevance=1.0 means Sol absolutely should respond, 0.0 means irrelevant.\n\
emoji: if Sol wouldn't write a full response but might react to the message, suggest a single emoji. \
pick something that feels natural and specific to the message — not generic thumbs up. leave empty if no reaction fits."
);
let messages = vec![ChatMessage::new_user_message(&prompt)];
@@ -159,21 +211,48 @@ impl Evaluator {
match result {
Ok(response) => {
let text = &response.choices[0].message.content;
info!(
raw_response = text.as_str(),
model = self.config.mistral.evaluation_model.as_str(),
"LLM evaluation raw response"
);
match serde_json::from_str::<serde_json::Value>(text) {
Ok(val) => {
let relevance = val["relevance"].as_f64().unwrap_or(0.0) as f32;
let hook = val["hook"].as_str().unwrap_or("").to_string();
let emoji = val["emoji"].as_str().unwrap_or("").to_string();
let threshold = self.config.behavior.spontaneous_threshold;
let reaction_threshold = self.config.behavior.reaction_threshold;
let reaction_enabled = self.config.behavior.reaction_enabled;
debug!(relevance, hook = hook.as_str(), "Evaluation result");
info!(
relevance,
threshold,
reaction_threshold,
hook = hook.as_str(),
emoji = emoji.as_str(),
"LLM evaluation parsed"
);
if relevance >= self.config.behavior.spontaneous_threshold {
if relevance >= threshold {
Engagement::MaybeRespond { relevance, hook }
} else if reaction_enabled
&& relevance >= reaction_threshold
&& !emoji.is_empty()
{
info!(
relevance,
emoji = emoji.as_str(),
"Reaction range — will react with emoji"
);
Engagement::React { emoji, relevance }
} else {
Engagement::Ignore
}
}
Err(e) => {
warn!("Failed to parse evaluation response: {e}");
warn!(raw = text.as_str(), "Failed to parse evaluation response: {e}");
Engagement::Ignore
}
}