release: Storybook v0.2.0 - Major syntax and features update

BREAKING CHANGES:
- Relationship syntax now requires blocks for all participants
- Removed self/other perspective blocks from relationships
- Replaced 'guard' keyword with 'if' for behavior tree decorators

Language Features:
- Add tree-sitter grammar with improved if/condition disambiguation
- Add comprehensive tutorial and reference documentation
- Add SBIR v0.2.0 binary format specification
- Add resource linking system for behaviors and schedules
- Add year-long schedule patterns (day, season, recurrence)
- Add behavior tree enhancements (named nodes, decorators)

Documentation:
- Complete tutorial series (9 chapters) with baker family examples
- Complete reference documentation for all language features
- SBIR v0.2.0 specification with binary format details
- Added locations and institutions documentation

Examples:
- Convert all examples to baker family scenario
- Add comprehensive working examples

Tooling:
- Zed extension with LSP integration
- Tree-sitter grammar for syntax highlighting
- Build scripts and development tools

Version Updates:
- Main package: 0.1.0 → 0.2.0
- Tree-sitter grammar: 0.1.0 → 0.2.0
- Zed extension: 0.1.0 → 0.2.0
- Storybook editor: 0.1.0 → 0.2.0
This commit is contained in:
2026-02-13 21:52:03 +00:00
parent 80332971b8
commit 16deb5d237
290 changed files with 90316 additions and 5827 deletions

28
src/bin/storybook-lsp.rs Normal file
View File

@@ -0,0 +1,28 @@
//! Storybook Language Server
//!
//! LSP server providing language support for Storybook DSL including:
//! - Real-time diagnostics
//! - Hover information
//! - Document symbols (outline)
//! - Go-to-definition
//! - Find references
//! - Autocomplete
//! - Document formatting
use tower_lsp::{
LspService,
Server,
};
#[tokio::main]
async fn main() {
env_logger::init();
let stdin = tokio::io::stdin();
let stdout = tokio::io::stdout();
let (service, socket) =
LspService::new(storybook::lsp::StorybookLanguageServer::new);
Server::new(stdin, stdout, socket).serve(service).await;
}

View File

@@ -137,6 +137,8 @@ fn test_duplicate_definition_error() {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
}),
Declaration::Character(Character {
@@ -144,6 +146,8 @@ fn test_duplicate_definition_error() {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(20, 30),
}),
],
@@ -314,9 +318,16 @@ fn test_trait_out_of_range_negative() {
fn test_schedule_overlap_error() {
let schedule = Schedule {
name: "DailyRoutine".to_string(),
extends: None,
fields: Vec::new(),
recurrences: Vec::new(),
blocks: vec![
ScheduleBlock {
name: None,
is_override: false,
activity: "work".to_string(),
action: None,
temporal_constraint: None,
start: Time {
hour: 8,
minute: 0,
@@ -331,7 +342,11 @@ fn test_schedule_overlap_error() {
span: Span::new(0, 50),
},
ScheduleBlock {
name: None,
is_override: false,
activity: "lunch".to_string(),
action: None,
temporal_constraint: None,
start: Time {
hour: 12,
minute: 0, // Overlaps with work!
@@ -433,6 +448,8 @@ fn test_duplicate_field_in_convert() {
},
],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};

View File

@@ -33,6 +33,8 @@
// Suppress false positive warnings from thiserror macro
#![allow(unused_assignments)]
pub mod lsp;
pub mod position;
pub mod query;
pub mod resolve;
pub mod syntax;

159
src/lsp/behavior_tests.rs Normal file
View File

@@ -0,0 +1,159 @@
//! Tests for behavior tree support in LSP
#[cfg(test)]
mod tests {
use crate::lsp::{
document::Document,
symbols,
};
const BEHAVIOR_SAMPLE: &str = r#"
behavior SimpleBehavior {
walk_around
}
behavior FindFood {
choose {
check_hungry
then {
find_nearest_food
move_to_food
eat
}
}
}
behavior SocialBehavior {
then {
greet
small_talk
farewell
}
}
behavior ComplexDecisionTree {
choose {
then {
check_threat
flee_to_safety
}
then {
check_resources
gather_resources
}
idle
}
}
"#;
#[test]
fn test_behavior_parsing() {
let doc = Document::new(BEHAVIOR_SAMPLE.to_string());
if !doc.parse_errors.is_empty() {
for err in &doc.parse_errors {
eprintln!("Parse error: {}", err.message);
}
}
assert!(doc.ast.is_some(), "Should parse behavior trees");
assert!(doc.parse_errors.is_empty(), "Should have no parse errors");
}
#[test]
fn test_behavior_symbols() {
let doc = Document::new(BEHAVIOR_SAMPLE.to_string());
// Should extract behavior declarations
assert!(doc.name_table.resolve_name("SimpleBehavior").is_some());
assert!(doc.name_table.resolve_name("FindFood").is_some());
assert!(doc.name_table.resolve_name("SocialBehavior").is_some());
assert!(doc.name_table.resolve_name("ComplexDecisionTree").is_some());
}
#[test]
fn test_behavior_symbol_kinds() {
use crate::resolve::names::DeclKind;
let doc = Document::new(BEHAVIOR_SAMPLE.to_string());
let find_food = doc.name_table.resolve_name("FindFood").unwrap();
assert_eq!(find_food.kind, DeclKind::Behavior);
}
#[test]
fn test_behavior_in_document_symbols() {
let doc = Document::new(BEHAVIOR_SAMPLE.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
// Should have behavior symbols
let behaviors: Vec<_> = symbols
.iter()
.filter(|s| s.kind == tower_lsp::lsp_types::SymbolKind::MODULE)
.collect();
assert_eq!(behaviors.len(), 4, "Should have 4 behaviors");
assert!(behaviors.iter().any(|b| b.name == "SimpleBehavior"));
assert!(behaviors.iter().any(|b| b.name == "FindFood"));
assert!(behaviors.iter().any(|b| b.name == "SocialBehavior"));
assert!(behaviors.iter().any(|b| b.name == "ComplexDecisionTree"));
}
#[test]
fn test_behavior_with_subtrees() {
let sample = r#"
behavior WithSubtrees {
then {
include helpers::check_preconditions
main_action
include helpers::cleanup
}
}
"#;
let doc = Document::new(sample.to_string());
assert!(doc.ast.is_some(), "Should parse subtrees");
assert!(doc.name_table.resolve_name("WithSubtrees").is_some());
}
#[test]
fn test_behavior_simple_action() {
let sample = r#"
behavior SimpleAction {
walk_around
}
"#;
let doc = Document::new(sample.to_string());
assert!(doc.ast.is_some(), "Should parse simple action");
assert!(doc.name_table.resolve_name("SimpleAction").is_some());
}
#[test]
fn test_behavior_selectors_and_sequences() {
let sample = r#"
behavior SelectorExample {
choose {
option_one
option_two
default_option
}
}
behavior SequenceExample {
then {
step_one
step_two
step_three
}
}
"#;
let doc = Document::new(sample.to_string());
assert!(doc.ast.is_some(), "Should parse selectors and sequences");
assert_eq!(doc.name_table.all_entries().count(), 2);
}
}

2578
src/lsp/code_actions.rs Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

775
src/lsp/completion.rs Normal file
View File

@@ -0,0 +1,775 @@
//! Autocomplete/completion provider
//!
//! Provides context-aware completion suggestions for:
//! - Keywords (filtered by context)
//! - Entity names (characters, templates, etc.)
//! - Field names (from templates/species when in character block)
//! - Type names (templates/species when after ':')
//! - Enum values
//! - Action names (in behavior trees)
use tower_lsp::lsp_types::{
CompletionItem,
CompletionItemKind,
CompletionList,
CompletionParams,
CompletionResponse,
Documentation,
MarkupContent,
MarkupKind,
};
use super::document::Document;
use crate::syntax::ast::Value;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum CompletionContext {
/// Top-level of the document
TopLevel,
/// Inside a character/template field block
InFieldBlock,
/// After a colon (expecting a type or value)
AfterColon,
/// Inside a behavior tree
InBehavior,
/// Inside a life arc
InLifeArc,
/// Inside a relationship
InRelationship,
/// Unknown context
Unknown,
}
/// Get completion items at a position
pub fn get_completions(doc: &Document, params: &CompletionParams) -> Option<CompletionResponse> {
let position = params.text_document_position.position;
// Check for field accessor using the specialized function
// It will return Some only if there's an identifier followed by a dot
if let Some(field_items) = get_field_accessor_completions(doc, position) {
return Some(CompletionResponse::List(CompletionList {
is_incomplete: false,
items: field_items,
}));
}
// Convert position to byte offset for context-based completions
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Check if we're typing a new identifier name after a declaration keyword
if is_typing_declaration_name(&doc.text, offset) {
// Don't show completions when typing a new identifier
return None;
}
// Determine context by analyzing text around cursor
let context = determine_context(&doc.text, offset);
let mut items = Vec::new();
match context {
| CompletionContext::TopLevel => {
// At top level, suggest declaration keywords
items.extend(top_level_keyword_completions());
// Also suggest existing entity names for reference
items.extend(entity_completions(doc));
},
| CompletionContext::InFieldBlock => {
// Inside a field block, suggest fields from the species/templates
if let Some(species_fields) = get_contextual_field_completions(doc, offset) {
items.extend(species_fields);
} else {
// Fallback to generic field keywords if we can't determine context
items.extend(field_keyword_completions());
}
},
| CompletionContext::AfterColon => {
// After colon, suggest types (templates, species)
items.extend(type_completions(doc));
},
| CompletionContext::InBehavior => {
// In behavior tree, suggest behavior-specific keywords
items.extend(behavior_keyword_completions());
items.extend(behavior_completions(doc)); // Reference to other
// behaviors
},
| CompletionContext::InLifeArc => {
// In life arc, suggest state-related keywords
items.extend(life_arc_keyword_completions());
},
| CompletionContext::InRelationship => {
// In relationship, suggest relationship-specific keywords
items.extend(relationship_keyword_completions());
items.extend(character_completions(doc)); // For participants
},
| CompletionContext::Unknown => {
// When context is unclear, provide all completions
items.extend(all_keyword_completions());
items.extend(entity_completions(doc));
},
}
// Set sort_text for proper ordering: field accessors first (0xxx), then others
// (1xxx)
for item in &mut items {
let detail = item.detail.as_deref().unwrap_or("");
let is_field = detail.contains("field") || detail.contains("trait");
// Field accessors get "0" prefix, others get "1" prefix
let prefix = if is_field { "0" } else { "1" };
item.sort_text = Some(format!("{}{}", prefix, item.label));
}
// Sort by sort_text for consistent ordering
items.sort_by(|a, b| {
let sort_a = a.sort_text.as_deref().unwrap_or(&a.label);
let sort_b = b.sort_text.as_deref().unwrap_or(&b.label);
sort_a.cmp(sort_b)
});
Some(CompletionResponse::Array(items))
}
/// Convert LSP position to byte offset
fn position_to_offset(doc: &Document, line: usize, character: usize) -> Option<usize> {
let line_start = doc.positions.line_offset(line)?;
Some(line_start + character)
}
/// Check if we're typing a new identifier name after a declaration keyword
fn is_typing_declaration_name(text: &str, offset: usize) -> bool {
use crate::syntax::lexer::{
Lexer,
Token,
};
// Get text before cursor (up to 200 chars)
let start = offset.saturating_sub(200);
let before = &text[start..offset.min(text.len())];
// Tokenize using lexer
let lexer = Lexer::new(before);
let tokens: Vec<_> = lexer.collect();
// Check if the last token (or second-to-last if we just typed an identifier)
// is a declaration keyword
if !tokens.is_empty() {
let last_idx = tokens.len() - 1;
// Check last token
if let (_offset, Token::Ident(keyword), _end) = &tokens[last_idx] {
if matches!(
keyword.as_str(),
"character" |
"template" |
"species" |
"behavior" |
"life_arc" |
"relationship" |
"institution" |
"location" |
"enum" |
"schedule"
) {
return true;
}
}
// Check second-to-last token (in case we're in the middle of typing an
// identifier)
if tokens.len() >= 2 {
let second_last_idx = tokens.len() - 2;
if let (_offset, Token::Ident(keyword), _end) = &tokens[second_last_idx] {
if matches!(
keyword.as_str(),
"character" |
"template" |
"species" |
"behavior" |
"life_arc" |
"relationship" |
"institution" |
"location" |
"enum" |
"schedule"
) {
// Make sure the last token is an identifier (not a colon or brace)
if let (_offset, Token::Ident(_), _end) = &tokens[last_idx] {
return true;
}
}
}
}
}
false
}
/// Format a value as its type string for documentation
fn format_value_type(value: &Value) -> String {
match value {
| Value::Identifier(path) => path.join("."),
| Value::String(_) => "String".to_string(),
| Value::Int(_) => "Int".to_string(),
| Value::Float(_) => "Float".to_string(),
| Value::Bool(_) => "Bool".to_string(),
| Value::List(items) => {
if items.is_empty() {
"List".to_string()
} else {
format!("[{}]", format_value_type(&items[0]))
}
},
| Value::Object(_) => "Object".to_string(),
| Value::Range(start, end) => {
format!("{}..{}", format_value_type(start), format_value_type(end))
},
| Value::Time(_) => "Time".to_string(),
| Value::Duration(_) => "Duration".to_string(),
| Value::ProseBlock(_) => "ProseBlock".to_string(),
| Value::Override(_) => "Override".to_string(),
}
}
/// Get field completions based on the current character/template context
fn get_contextual_field_completions(doc: &Document, offset: usize) -> Option<Vec<CompletionItem>> {
use crate::{
resolve::names::DeclKind,
syntax::ast::Declaration,
};
let ast = doc.ast.as_ref()?;
// Find which declaration contains the cursor offset
for decl in &ast.declarations {
match decl {
| Declaration::Character(character) => {
// Check if cursor is inside this character block
if offset >= character.span.start && offset <= character.span.end {
let mut items = Vec::new();
// Add special keywords
items.push(simple_item(
"from",
"Apply a template",
"from ${1:TemplateName}",
));
items.push(simple_item(
"include",
"Include a template",
"include ${1:TemplateName}",
));
// Add fields from species
if let Some(ref species_name) = character.species {
if let Some(species_entry) = doc.name_table.resolve_name(species_name) {
if species_entry.kind == DeclKind::Species {
for species_decl in &ast.declarations {
if let Declaration::Species(species) = species_decl {
if &species.name == species_name {
for field in &species.fields {
items.push(CompletionItem {
label: format!("{}:", field.name),
kind: Some(CompletionItemKind::FIELD),
detail: Some(format!("({})", species_name)),
insert_text: Some(format!("{}: $0", field.name)),
insert_text_format: Some(tower_lsp::lsp_types::InsertTextFormat::SNIPPET),
..Default::default()
});
}
}
}
}
}
}
}
return Some(items);
}
},
| Declaration::Template(template) => {
// Check if cursor is inside this template block
if offset >= template.span.start && offset <= template.span.end {
let mut items = Vec::new();
// Add special keywords for templates
items.push(simple_item(
"include",
"Include a template",
"include ${1:TemplateName}",
));
// Templates can suggest common field patterns
return Some(items);
}
},
| _ => {},
}
}
None
}
/// Get field completions when triggered by `.` using lexer
fn get_field_accessor_completions(
doc: &Document,
position: tower_lsp::lsp_types::Position,
) -> Option<Vec<CompletionItem>> {
use crate::{
resolve::names::DeclKind,
syntax::{
ast::Declaration,
lexer::{
Lexer,
Token,
},
},
};
// Lex the line up to the cursor to find the identifier before the dot
let line_offset = doc.positions.line_offset(position.line as usize)?;
let line_end = (line_offset + position.character as usize).min(doc.text.len());
let line_text = &doc.text[line_offset..line_end];
// Lex tokens on this line
let lexer = Lexer::new(line_text);
let tokens: Vec<_> = lexer.collect();
// Check if there's a dot token - if not, this isn't a field accessor
let has_dot = tokens
.iter()
.any(|(_, token, _)| matches!(token, Token::Dot));
if !has_dot {
return None;
}
// Find the last identifier before the last dot
let mut last_ident = None;
for (_start, token, _end) in &tokens {
match token {
| Token::Ident(name) => last_ident = Some(name.clone()),
| Token::Dot => {
// We found a dot - if we have an identifier, that's our target
if last_ident.is_some() {
break;
}
},
| _ => {},
}
}
// If there's a dot but no identifier, return empty list to block keywords
let identifier = match last_ident {
| Some(id) => id,
| None => return Some(Vec::new()),
};
// Look up the identifier - if it fails, still return empty to block keywords
let entry = match doc.name_table.resolve_name(&identifier) {
| Some(e) => e,
| None => return Some(Vec::new()),
};
let ast = match doc.ast.as_ref() {
| Some(a) => a,
| None => return Some(Vec::new()),
};
let mut items = Vec::new();
match entry.kind {
| DeclKind::Character => {
for decl in &ast.declarations {
if let Declaration::Character(character) = decl {
if character.name == identifier {
// Add character's own fields
for field in &character.fields {
let value_type = format_value_type(&field.value);
items.push(CompletionItem {
label: field.name.clone(),
kind: Some(CompletionItemKind::FIELD),
detail: None, // Keep inline display clean
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: format!(
"**Field** of `{}`\n\nType: `{}`",
identifier, value_type
),
})),
..Default::default()
});
}
// Add species fields
if let Some(ref species_name) = character.species {
if let Some(species_entry) = doc.name_table.resolve_name(species_name) {
if species_entry.kind == DeclKind::Species {
for decl in &ast.declarations {
if let Declaration::Species(species) = decl {
if &species.name == species_name {
for field in &species.fields {
let value_type =
format_value_type(&field.value);
items.push(CompletionItem {
label: field.name.clone(),
kind: Some(CompletionItemKind::FIELD),
detail: Some(format!("({})", species_name)),
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: format!("**Trait** from `{}`\n\nType: `{}`", species_name, value_type),
})),
..Default::default()
});
}
}
}
}
}
}
}
// Add template fields
if let Some(ref template_names) = character.template {
for template_name in template_names {
if let Some(template_entry) =
doc.name_table.resolve_name(template_name)
{
if template_entry.kind == DeclKind::Template {
for decl in &ast.declarations {
if let Declaration::Template(template) = decl {
if &template.name == template_name {
for field in &template.fields {
let value_type =
format_value_type(&field.value);
items.push(CompletionItem {
label: field.name.clone(),
kind: Some(CompletionItemKind::FIELD),
detail: Some(format!("({})", template_name)),
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: format!("**Template field** from `{}`\n\nType: `{}`", template_name, value_type),
})),
..Default::default()
});
}
}
}
}
}
}
}
}
break;
}
}
}
},
// For non-character declarations, still return empty list to block keywords
| _ => return Some(Vec::new()),
}
// Always return Some to block keyword completions, even if no fields found
items.sort_by(|a, b| a.label.cmp(&b.label));
Some(items)
}
/// Determine completion context by analyzing tokens around cursor using lexer
fn determine_context(text: &str, offset: usize) -> CompletionContext {
use crate::syntax::lexer::{
Lexer,
Token,
};
// Get text before cursor (up to 500 chars for context)
let start = offset.saturating_sub(500);
let before = &text[start..offset.min(text.len())];
// Tokenize using lexer
let lexer = Lexer::new(before);
let tokens: Vec<_> = lexer.collect();
// Track state by analyzing tokens
let mut nesting_level: i32 = 0;
let mut last_keyword = None;
let mut seen_colon_without_brace = false;
for (_offset, token, _end) in &tokens {
match token {
| Token::LBrace => nesting_level += 1,
| Token::RBrace => nesting_level = nesting_level.saturating_sub(1),
| Token::Colon => {
// Mark that we've seen a colon
seen_colon_without_brace = true;
},
| Token::LBrace if seen_colon_without_brace => {
// Opening brace after colon - we've entered the block
seen_colon_without_brace = false;
},
| Token::Ident(keyword)
if matches!(
keyword.as_str(),
"character" |
"template" |
"species" |
"behavior" |
"life_arc" |
"relationship" |
"institution" |
"location" |
"enum" |
"schedule"
) =>
{
last_keyword = Some(keyword.clone());
seen_colon_without_brace = false;
},
| _ => {},
}
}
// If we saw a colon without a brace after it, we're in type position
if seen_colon_without_brace {
return CompletionContext::AfterColon;
}
// At top level if no nesting
if nesting_level == 0 {
return CompletionContext::TopLevel;
}
// Determine context based on last keyword and nesting
match last_keyword.as_deref() {
| Some("behavior") if nesting_level > 0 => CompletionContext::InBehavior,
| Some("life_arc") if nesting_level > 0 => CompletionContext::InLifeArc,
| Some("relationship") if nesting_level > 0 => CompletionContext::InRelationship,
| Some("character" | "template" | "species" | "institution" | "location")
if nesting_level > 0 =>
{
CompletionContext::InFieldBlock
},
| _ => CompletionContext::Unknown,
}
}
/// Get entity completions (all symbols)
fn entity_completions(doc: &Document) -> Vec<CompletionItem> {
use crate::resolve::names::DeclKind;
let mut items = Vec::new();
for entry in doc.name_table.all_entries() {
let kind = match entry.kind {
| DeclKind::Character => CompletionItemKind::CLASS,
| DeclKind::Template => CompletionItemKind::INTERFACE,
| DeclKind::LifeArc => CompletionItemKind::FUNCTION,
| DeclKind::Schedule => CompletionItemKind::EVENT,
| DeclKind::Behavior => CompletionItemKind::MODULE,
| DeclKind::Institution => CompletionItemKind::MODULE,
| DeclKind::Relationship => CompletionItemKind::STRUCT,
| DeclKind::Location => CompletionItemKind::CONSTANT,
| DeclKind::Species => CompletionItemKind::CLASS,
| DeclKind::Enum => CompletionItemKind::ENUM,
};
let name = entry
.qualified_path
.last()
.unwrap_or(&String::new())
.clone();
items.push(CompletionItem {
label: name,
kind: Some(kind),
detail: Some(format!("{:?}", entry.kind)),
..Default::default()
});
}
items
}
/// Get type completions (templates and species)
fn type_completions(doc: &Document) -> Vec<CompletionItem> {
use crate::resolve::names::DeclKind;
let mut items = Vec::new();
for entry in doc.name_table.all_entries() {
match entry.kind {
| DeclKind::Template | DeclKind::Species => {
let name = entry
.qualified_path
.last()
.unwrap_or(&String::new())
.clone();
items.push(CompletionItem {
label: name,
kind: Some(CompletionItemKind::INTERFACE),
detail: Some(format!("{:?}", entry.kind)),
documentation: Some(Documentation::String("Type annotation".to_string())),
..Default::default()
});
},
| _ => {},
}
}
items
}
/// Get behavior completions
fn behavior_completions(doc: &Document) -> Vec<CompletionItem> {
use crate::resolve::names::DeclKind;
let mut items = Vec::new();
for entry in doc.name_table.entries_of_kind(DeclKind::Behavior) {
let name = entry
.qualified_path
.last()
.unwrap_or(&String::new())
.clone();
items.push(CompletionItem {
label: format!("@{}", name),
kind: Some(CompletionItemKind::REFERENCE),
detail: Some("Behavior tree reference".to_string()),
insert_text: Some(format!("@{}", name)),
..Default::default()
});
}
items
}
/// Get character completions
fn character_completions(doc: &Document) -> Vec<CompletionItem> {
use crate::resolve::names::DeclKind;
let mut items = Vec::new();
for entry in doc.name_table.entries_of_kind(DeclKind::Character) {
let name = entry
.qualified_path
.last()
.unwrap_or(&String::new())
.clone();
items.push(CompletionItem {
label: name,
kind: Some(CompletionItemKind::CLASS),
detail: Some("Character".to_string()),
..Default::default()
});
}
items
}
/// Get all keyword completions (fallback)
fn all_keyword_completions() -> Vec<CompletionItem> {
let mut items = top_level_keyword_completions();
items.extend(field_keyword_completions());
items.extend(behavior_keyword_completions());
items.extend(life_arc_keyword_completions());
items.extend(relationship_keyword_completions());
items
}
/// Get top-level declaration keywords
fn top_level_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item("character", "Define a character entity", "character ${1:Name}: ${2:Species} {\n $0\n}"),
keyword_item("template", "Define a reusable field template", "template ${1:Name} {\n $0\n}"),
keyword_item("life_arc", "Define a state machine", "life_arc ${1:Name} {\n state ${2:initial} {\n $0\n }\n}"),
keyword_item("schedule", "Define a daily schedule", "schedule ${1:Name} {\n ${2:08:00} -> ${3:09:00}: ${4:block_name} {\n $0\n }\n}"),
keyword_item("behavior", "Define a behavior tree", "behavior ${1:Name} {\n $0\n}"),
keyword_item("institution", "Define an organization", "institution ${1:Name} {\n $0\n}"),
keyword_item("relationship", "Define a relationship", "relationship ${1:Name} {\n $0\n}"),
keyword_item("location", "Define a location", "location ${1:Name} {\n $0\n}"),
keyword_item("species", "Define a species", "species ${1:Name} {\n $0\n}"),
keyword_item("enum", "Define an enumeration", "enum ${1:Name} {\n ${2:Value1}\n ${3:Value2}\n}"),
keyword_item("use", "Import declarations", "use ${1:path::to::item};"),
]
}
/// Get field-level keywords
fn field_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item("from", "Apply a template", "from ${1:TemplateName}"),
keyword_item("include", "Include a template", "include ${1:TemplateName}"),
keyword_item("strict", "Enforce strict template fields", "strict"),
// Common field names
simple_item("age", "Age field", "age: ${1:0}"),
simple_item("name", "Name field", "name: \"${1:Name}\""),
simple_item("bond", "Bond trait (0.0-1.0)", "bond: ${1:0.5}"),
simple_item("trust", "Trust trait (0.0-1.0)", "trust: ${1:0.5}"),
simple_item("love", "Love trait (0.0-1.0)", "love: ${1:0.5}"),
]
}
/// Get behavior tree keywords
fn behavior_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item(
"?",
"Selector node (try options in order)",
"? {\n $0\n}",
),
keyword_item(">", "Sequence node (execute in order)", "> {\n $0\n}"),
keyword_item("*", "Repeat node (loop forever)", "* {\n $0\n}"),
simple_item("@", "Subtree reference", "@${1:behavior::name}"),
]
}
/// Get life arc keywords
fn life_arc_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item(
"state",
"Define a life arc state",
"state ${1:name} {\n $0\n}",
),
keyword_item(
"on",
"Define a transition",
"on ${1:condition} -> ${2:target_state}",
),
]
}
/// Get relationship keywords
fn relationship_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item(
"as",
"Define participant role",
"${1:CharacterName} as ${2:role} {\n $0\n}",
),
keyword_item("self", "Reference self in relationships", "self.${1:field}"),
keyword_item("other", "Reference other participant", "other.${1:field}"),
]
}
fn keyword_item(label: &str, detail: &str, snippet: &str) -> CompletionItem {
CompletionItem {
label: label.to_string(),
kind: Some(CompletionItemKind::KEYWORD),
detail: Some(detail.to_string()),
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: format!("**{}**\n\n{}", label, detail),
})),
insert_text: Some(snippet.to_string()),
insert_text_format: Some(tower_lsp::lsp_types::InsertTextFormat::SNIPPET),
..Default::default()
}
}
fn simple_item(label: &str, detail: &str, snippet: &str) -> CompletionItem {
CompletionItem {
label: label.to_string(),
kind: Some(CompletionItemKind::PROPERTY),
detail: Some(detail.to_string()),
insert_text: Some(snippet.to_string()),
insert_text_format: Some(tower_lsp::lsp_types::InsertTextFormat::SNIPPET),
..Default::default()
}
}

282
src/lsp/completion_tests.rs Normal file
View File

@@ -0,0 +1,282 @@
//! Tests for context-aware completion
#[cfg(test)]
mod tests {
use tower_lsp::lsp_types::{
CompletionParams,
Position,
TextDocumentIdentifier,
TextDocumentPositionParams,
Url,
};
use crate::lsp::{
completion,
document::Document,
};
fn make_params(line: u32, character: u32) -> CompletionParams {
CompletionParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position { line, character },
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: None,
}
}
#[test]
fn test_top_level_completions() {
let doc = Document::new("".to_string());
let params = make_params(0, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have top-level keywords
assert!(items.iter().any(|item| item.label == "character"));
assert!(items.iter().any(|item| item.label == "template"));
assert!(items.iter().any(|item| item.label == "behavior"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_field_block_completions() {
let source = "character Alice {\n \n}";
let doc = Document::new(source.to_string());
// Position inside the character block (line 1, after spaces)
let params = make_params(1, 4);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have field-related keywords
assert!(items.iter().any(|item| item.label == "from"));
assert!(items.iter().any(|item| item.label == "age"));
assert!(items.iter().any(|item| item.label == "bond"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_completions_include_templates() {
// Test that templates show up in completions
let source = "template Child { age: number }\n";
let doc = Document::new(source.to_string());
let params = make_params(1, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should include Child template in completions
assert!(
items.iter().any(|item| item.label == "Child"),
"Should have Child template in completions"
);
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_behavior_completions() {
let source = "behavior Test {\n \n}";
let doc = Document::new(source.to_string());
// Position inside behavior block
let params = make_params(1, 4);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have behavior tree keywords
assert!(items.iter().any(|item| item.label == "?"));
assert!(items.iter().any(|item| item.label == ">"));
assert!(items.iter().any(|item| item.label == "*"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_life_arc_completions() {
let source = "life_arc Growing {\n \n}";
let doc = Document::new(source.to_string());
// Position inside life arc block
let params = make_params(1, 4);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have life arc keywords
assert!(items.iter().any(|item| item.label == "state"));
assert!(items.iter().any(|item| item.label == "on"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_relationship_completions() {
let source = "relationship Friends {\n \n}";
let doc = Document::new(source.to_string());
// Position inside relationship block
let params = make_params(1, 4);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have relationship keywords
assert!(items.iter().any(|item| item.label == "as"));
assert!(items.iter().any(|item| item.label == "self"));
assert!(items.iter().any(|item| item.label == "other"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_type_suggestions_in_completions() {
// More complete example with proper syntax
let source = r#"template Child { age: number }
species Human {}
character Alice: Child {}
character Bob {}"#;
let doc = Document::new(source.to_string());
// Just check that templates and species are in completions
let params = make_params(0, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have Child and Human in completions
assert!(items.iter().any(|item| item.label == "Child"));
assert!(items.iter().any(|item| item.label == "Human"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_behavior_reference_in_symbols() {
// Check that behaviors are in symbol table and show up in completions
let source = "behavior WalkAround { patrol }\nbehavior Main { idle }";
let doc = Document::new(source.to_string());
let params = make_params(0, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Behaviors should be in completions
assert!(
items.iter().any(|item| item.label.contains("WalkAround")),
"Should have WalkAround in completions"
);
assert!(
items.iter().any(|item| item.label.contains("Main")),
"Should have Main in completions"
);
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_snippet_format_in_completions() {
let doc = Document::new("".to_string());
let params = make_params(0, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Check that snippets have proper format
let char_item = items.iter().find(|item| item.label == "character");
assert!(char_item.is_some());
if let Some(item) = char_item {
assert!(item.insert_text.is_some());
assert!(item.insert_text_format.is_some());
// Should contain snippet placeholders
assert!(item.insert_text.as_ref().unwrap().contains("${"));
}
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_no_duplicate_completions() {
let source = "character Alice {}\ncharacter Alice {}"; // Duplicate name
let doc = Document::new(source.to_string());
let params = make_params(0, 0);
// Duplicate definitions cause NameTable::from_file() to fail,
// resulting in an empty name table and no completions.
// This is correct - duplicates should be caught as validation errors.
assert!(
!doc.resolve_errors.is_empty(),
"Should have validation error for duplicate"
);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Count how many times "Alice" appears
let alice_count = items.iter().filter(|item| item.label == "Alice").count();
assert_eq!(
alice_count, 0,
"Should have no completions when there are duplicate definitions"
);
},
| _ => panic!("Expected array response"),
}
}
}
}

55
src/lsp/definition.rs Normal file
View File

@@ -0,0 +1,55 @@
//! Go-to-definition provider
//!
//! Allows jumping to the definition of symbols
use tower_lsp::lsp_types::{
GotoDefinitionParams,
GotoDefinitionResponse,
Location,
Range,
Url,
};
use super::document::Document;
/// Get the definition location for a symbol at a position
pub fn get_definition(
doc: &Document,
params: &GotoDefinitionParams,
uri: &Url,
) -> Option<GotoDefinitionResponse> {
let position = params.text_document_position_params.position;
// Convert LSP position to byte offset
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Get the word at the cursor
let word = doc.word_at_offset(offset)?;
// Look up the symbol in the name table
let entry = doc.name_table.resolve_name(&word)?;
let mut positions = doc.positions.clone();
let (start_line, start_col) = positions.offset_to_position(entry.span.start);
let (end_line, end_col) = positions.offset_to_position(entry.span.end);
Some(GotoDefinitionResponse::Scalar(Location {
uri: uri.clone(),
range: Range {
start: tower_lsp::lsp_types::Position {
line: start_line as u32,
character: start_col as u32,
},
end: tower_lsp::lsp_types::Position {
line: end_line as u32,
character: end_col as u32,
},
},
}))
}
/// Convert LSP position to byte offset
fn position_to_offset(doc: &Document, line: usize, character: usize) -> Option<usize> {
let line_start = doc.positions.line_offset(line)?;
Some(line_start + character)
}

163
src/lsp/diagnostics.rs Normal file
View File

@@ -0,0 +1,163 @@
//! Diagnostics conversion from Storybook errors to LSP diagnostics
use tower_lsp::lsp_types::{
Diagnostic,
DiagnosticSeverity,
Position,
Range,
};
use crate::syntax::lexer::{
Lexer,
Token,
};
/// Compute diagnostics for a document
pub fn compute_diagnostics(text: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
// Try to parse the document
// For now, we'll do a simple check - a real implementation would use the full
// parser
match try_parse(text) {
| Ok(_) => {
// No syntax errors
},
| Err(errors) => {
for error in errors {
diagnostics.push(error);
}
},
}
diagnostics
}
/// Attempt to parse the document and return diagnostics
fn try_parse(text: &str) -> Result<(), Vec<Diagnostic>> {
// TODO: Integrate with actual parser
// For now, return Ok for all documents
// This will be implemented when we add Span tracking
// Simple placeholder: check for common syntax errors using lexer tokens
let mut errors = Vec::new();
// Track brace nesting level and position of each brace
let mut nesting_stack: Vec<usize> = Vec::new(); // Stack of opening brace positions
let lexer = Lexer::new(text);
for (offset, token, _end) in lexer {
match token {
| Token::LBrace => {
nesting_stack.push(offset);
},
| Token::RBrace => {
if nesting_stack.is_empty() {
// Unexpected closing brace - no matching opening brace
let pos = byte_offset_to_position(text, offset);
errors.push(Diagnostic {
range: Range {
start: pos,
end: Position {
line: pos.line,
character: pos.character + 1,
},
},
severity: Some(DiagnosticSeverity::ERROR),
code: None,
source: Some("storybook".to_string()),
message: "Unexpected closing brace".to_string(),
related_information: None,
tags: None,
code_description: None,
data: None,
});
} else {
nesting_stack.pop();
}
},
| _ => {},
}
}
// Note: We don't report unclosed braces (nesting_stack not empty)
// because those are common in incomplete/in-progress code
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
fn byte_offset_to_line(text: &str, offset: usize) -> usize {
let mut line = 0;
let mut current_offset = 0;
for ch in text.chars() {
if current_offset >= offset {
break;
}
if ch == '\n' {
line += 1;
}
current_offset += ch.len_utf8();
}
line
}
/// Convert a byte offset to line/column position
/// This is a placeholder - will be replaced when we have proper Span tracking
pub fn byte_offset_to_position(text: &str, offset: usize) -> Position {
let mut line = 0;
let mut character = 0;
let mut current_offset = 0;
for ch in text.chars() {
if current_offset >= offset {
break;
}
if ch == '\n' {
line += 1;
character = 0;
} else {
character += 1;
}
current_offset += ch.len_utf8();
}
Position {
line: line as u32,
character: character as u32,
}
}
/// Create a diagnostic from a span and message
pub fn create_diagnostic(
text: &str,
start: usize,
end: usize,
message: String,
severity: DiagnosticSeverity,
) -> Diagnostic {
Diagnostic {
range: Range {
start: byte_offset_to_position(text, start),
end: byte_offset_to_position(text, end),
},
severity: Some(severity),
code: None,
source: Some("storybook".to_string()),
message,
related_information: None,
tags: None,
code_description: None,
data: None,
}
}

View File

@@ -0,0 +1,271 @@
//! Tests for diagnostics functionality
#[cfg(test)]
mod tests {
use tower_lsp::lsp_types::DiagnosticSeverity;
use crate::lsp::{
diagnostics,
document::Document,
};
#[test]
fn test_parse_error_diagnostics() {
let invalid = "character { missing name }";
let doc = Document::new(invalid.to_string());
assert!(!doc.parse_errors.is_empty(), "Should have parse errors");
assert!(doc.ast.is_none(), "Should not have AST");
}
#[test]
fn test_multiple_parse_errors() {
let invalid = r#"
character {
}
template {
}
invalid syntax here
"#;
let doc = Document::new(invalid.to_string());
// Should report errors (may be combined into one or multiple)
assert!(!doc.parse_errors.is_empty());
}
#[test]
fn test_no_errors_for_valid_code() {
let valid = "character Alice { age: 7 }";
let doc = Document::new(valid.to_string());
assert!(
doc.parse_errors.is_empty(),
"Valid code should have no errors"
);
assert!(doc.ast.is_some(), "Should have parsed AST");
}
#[test]
fn test_error_positions() {
let invalid = "character Alice { age: }"; // Missing value
let doc = Document::new(invalid.to_string());
if !doc.parse_errors.is_empty() {
let error = &doc.parse_errors[0];
// Error should have valid position
assert!(error.start < invalid.len());
assert!(error.end <= invalid.len());
assert!(error.start <= error.end);
}
}
#[test]
fn test_error_messages_are_descriptive() {
let invalid = "character Alice { age: }";
let doc = Document::new(invalid.to_string());
if !doc.parse_errors.is_empty() {
let error = &doc.parse_errors[0];
assert!(
!error.message.is_empty(),
"Error message should not be empty"
);
assert!(
error.message.len() > 10,
"Error message should be descriptive"
);
}
}
#[test]
fn test_error_severity() {
use crate::lsp::document::ErrorSeverity;
let invalid = "character { }";
let doc = Document::new(invalid.to_string());
if !doc.parse_errors.is_empty() {
let error = &doc.parse_errors[0];
// Parse errors should be Error severity
assert!(matches!(error.severity, ErrorSeverity::Error));
}
}
#[test]
fn test_unclosed_brace_error() {
let invalid = "character Alice { age: 7";
let doc = Document::new(invalid.to_string());
assert!(!doc.parse_errors.is_empty(), "Should detect unclosed brace");
}
#[test]
fn test_unexpected_token_error() {
let invalid = "character Alice } age: 7 {"; // Backwards braces
let doc = Document::new(invalid.to_string());
assert!(
!doc.parse_errors.is_empty(),
"Should detect unexpected tokens"
);
}
#[test]
fn test_invalid_field_syntax() {
let invalid = "character Alice { age = 7 }"; // Wrong operator
let doc = Document::new(invalid.to_string());
// May or may not parse depending on parser flexibility
// Just verify we handle it gracefully
assert!(doc.ast.is_some() || !doc.parse_errors.is_empty());
}
#[test]
fn test_empty_input() {
let doc = Document::new("".to_string());
// Empty input is valid - just no declarations
assert!(doc.parse_errors.is_empty());
}
#[test]
fn test_whitespace_only() {
let doc = Document::new(" \n\n\t\t ".to_string());
// Whitespace only is valid
assert!(doc.parse_errors.is_empty());
}
#[test]
fn test_comments_only() {
let doc = Document::new("// Just comments\n// Nothing else".to_string());
// Comments only is valid
assert!(doc.parse_errors.is_empty());
}
// Tests for diagnostics utility functions
#[test]
fn test_byte_offset_to_position_start() {
let text = "hello world";
let pos = diagnostics::byte_offset_to_position(text, 0);
assert_eq!(pos.line, 0);
assert_eq!(pos.character, 0);
}
#[test]
fn test_byte_offset_to_position_middle() {
let text = "hello world";
let pos = diagnostics::byte_offset_to_position(text, 6);
assert_eq!(pos.line, 0);
assert_eq!(pos.character, 6);
}
#[test]
fn test_byte_offset_to_position_multiline() {
let text = "line 1\nline 2\nline 3";
let pos = diagnostics::byte_offset_to_position(text, 7); // Start of line 2
assert_eq!(pos.line, 1);
assert_eq!(pos.character, 0);
}
#[test]
fn test_byte_offset_to_position_beyond_text() {
let text = "short";
let pos = diagnostics::byte_offset_to_position(text, 1000);
// Should not panic, returns position at end (line is always valid u32)
assert!(pos.line == 0 || pos.line > 0);
}
#[test]
fn test_create_diagnostic() {
let text = "character Alice {}";
let diag = diagnostics::create_diagnostic(
text,
0,
9,
"Test message".to_string(),
DiagnosticSeverity::ERROR,
);
assert_eq!(diag.message, "Test message");
assert_eq!(diag.severity, Some(DiagnosticSeverity::ERROR));
assert_eq!(diag.source, Some("storybook".to_string()));
}
#[test]
fn test_create_diagnostic_with_warning() {
let text = "test";
let diag = diagnostics::create_diagnostic(
text,
0,
4,
"Warning message".to_string(),
DiagnosticSeverity::WARNING,
);
assert_eq!(diag.severity, Some(DiagnosticSeverity::WARNING));
}
#[test]
fn test_compute_diagnostics_valid() {
let text = "character Alice { age: 7 }";
let diags = diagnostics::compute_diagnostics(text);
// Valid code should produce no diagnostics from the placeholder implementation
// (The real diagnostics come from document.rs)
// Just verify the function runs without panicking - result may vary
let _ = diags;
}
#[test]
fn test_compute_diagnostics_unclosed_brace() {
let text = "} extra closing brace";
let diags = diagnostics::compute_diagnostics(text);
// Should detect the unexpected closing brace
assert!(!diags.is_empty());
assert!(diags[0].message.contains("closing brace"));
}
#[test]
fn test_braces_in_strings_are_ignored() {
// This was the bug: braces inside string literals were being counted
let text = r#"character Alice {
description: "A person with { and } in their bio"
age: 7
}"#;
let diags = diagnostics::compute_diagnostics(text);
// Should NOT report any errors - braces in strings should be ignored
assert!(
diags.is_empty(),
"Braces in strings should not trigger errors"
);
}
#[test]
fn test_actual_unmatched_brace_detected() {
// But actual unmatched braces should still be detected
let text = r#"character Alice {
age: 7
}
}"#; // Extra closing brace
let diags = diagnostics::compute_diagnostics(text);
assert!(
!diags.is_empty(),
"Actual unmatched braces should be detected"
);
assert!(diags[0].message.contains("closing brace"));
}
#[test]
fn test_braces_in_comments_are_ignored() {
// Comments should also be ignored
let text = r#"character Alice {
age: 7 // This has { and } in comment
}"#;
let diags = diagnostics::compute_diagnostics(text);
assert!(
diags.is_empty(),
"Braces in comments should not trigger errors"
);
}
}

419
src/lsp/document.rs Normal file
View File

@@ -0,0 +1,419 @@
//! Document state management for LSP
//!
//! This module manages parsed document state including AST, position tracking,
//! and symbol tables for efficient LSP operations.
use crate::{
position::PositionTracker,
resolve::{
names::NameTable,
validate,
ErrorCollector,
ResolveError,
},
syntax::{
ast::File,
lexer::Lexer,
FileParser,
},
};
/// Parsed document with all necessary state for LSP operations
pub struct Document {
/// The source text
pub text: String,
/// Position tracker for offset -> line/col conversion
pub positions: PositionTracker,
/// Parsed AST (if parsing succeeded)
pub ast: Option<File>,
/// Parse errors
pub parse_errors: Vec<ParseError>,
/// Semantic validation errors from resolver
pub resolve_errors: Vec<ResolveError>,
/// Name table from the language's semantic resolution
pub name_table: NameTable,
}
#[derive(Debug, Clone)]
pub struct ParseError {
pub start: usize,
pub end: usize,
pub message: String,
pub severity: ErrorSeverity,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ErrorSeverity {
Error,
Warning,
}
impl Document {
/// Create a new document from source text
pub fn new(text: String) -> Self {
let positions = PositionTracker::new(&text);
// Parse the document
let (ast, parse_errors) = Self::parse(&text);
// Build name table and run semantic validation
let (name_table, resolve_errors) = if let Some(ref ast) = ast {
// Use language's NameTable for semantic resolution
let mut resolve_errors = Vec::new();
let name_table = match NameTable::from_file(ast) {
| Ok(table) => table,
| Err(e) => {
// Capture NameTable errors (e.g., duplicate definitions)
resolve_errors.push(e);
NameTable::new()
},
};
// Also run validation
resolve_errors.extend(Self::validate(ast));
(name_table, resolve_errors)
} else {
(NameTable::new(), Vec::new())
};
Self {
text,
positions,
ast,
parse_errors,
resolve_errors,
name_table,
}
}
/// Update the document with new text
pub fn update(&mut self, text: String) {
self.text = text;
self.positions = PositionTracker::new(&self.text);
let (ast, parse_errors) = Self::parse(&self.text);
self.ast = ast;
self.parse_errors = parse_errors;
let (name_table, resolve_errors) = if let Some(ref ast) = self.ast {
// Use language's NameTable for semantic resolution
let mut resolve_errors = Vec::new();
let name_table = match NameTable::from_file(ast) {
| Ok(table) => table,
| Err(e) => {
// Capture NameTable errors (e.g., duplicate definitions)
resolve_errors.push(e);
NameTable::new()
},
};
// Also run validation
resolve_errors.extend(Self::validate(ast));
(name_table, resolve_errors)
} else {
(NameTable::new(), Vec::new())
};
self.name_table = name_table;
self.resolve_errors = resolve_errors;
}
/// Run semantic validation on the AST
fn validate(ast: &File) -> Vec<ResolveError> {
let mut collector = ErrorCollector::default();
// Validate each declaration type
for decl in &ast.declarations {
use crate::syntax::ast::Declaration;
match decl {
| Declaration::Character(c) => {
validate::validate_no_reserved_keywords(&c.fields, &mut collector);
validate::validate_trait_ranges(&c.fields, &mut collector);
},
| Declaration::Template(t) => {
validate::validate_no_reserved_keywords(&t.fields, &mut collector);
},
| Declaration::LifeArc(l) => {
validate::validate_life_arc_transitions(l, &mut collector);
},
| Declaration::Schedule(s) => {
validate::validate_schedule_overlaps(s, &mut collector);
},
| Declaration::Behavior(_b) => {
// Behavior validation requires action registry, skip for
// now TODO: Add action registry support
},
| Declaration::Relationship(_r) => {
// Relationship bond validation happens at a different level
// (requires multiple relationships), skip for now
},
| _ => {},
}
}
// Extract errors from collector
// Since ErrorCollector doesn't expose errors directly, we use into_result and
// extract
match collector.into_result(()) {
| Ok(_) => Vec::new(),
| Err(ResolveError::MultipleErrors { errors, .. }) => errors,
| Err(e) => vec![e],
}
}
/// Lex text up to a position and return tokens with their spans
/// Returns Vec<(start, token, end)>
fn lex_until(text: &str, until: usize) -> Vec<(usize, crate::syntax::lexer::Token, usize)> {
let lexer = Lexer::new(&text[..until.min(text.len())]);
lexer.collect()
}
/// Parse the source text
fn parse(text: &str) -> (Option<File>, Vec<ParseError>) {
let lexer = Lexer::new(text);
let parser = FileParser::new();
match parser.parse(lexer) {
| Ok(file) => (Some(file), Vec::new()),
| Err(e) => {
use lalrpop_util::ParseError as LalrpopError;
let (start, end, message) = match e {
| LalrpopError::InvalidToken { location } => {
(location, location + 1, "Invalid token".to_string())
},
| LalrpopError::UnrecognizedEof { location, expected } => {
let expected_str = if expected.is_empty() {
String::new()
} else if expected.len() == 1 {
format!(" (expected {})", expected[0])
} else {
format!(" (expected one of: {})", expected.join(", "))
};
(
location,
location,
format!("Unexpected end of file{}", expected_str),
)
},
| LalrpopError::UnrecognizedToken {
token: (start, tok, end),
expected,
} => {
use crate::syntax::lexer::Token;
// Lex tokens up to the error position
let tokens = Self::lex_until(text, start);
// Check what we expected vs what we got
let expecting_close_brace = expected.iter().any(|e| e.contains("}"));
let expecting_comma = expected.iter().any(|e| e.contains(","));
let expecting_colon = expected.iter().any(|e| e.contains(":"));
let is_declaration_keyword = matches!(
tok,
Token::Character |
Token::Template |
Token::LifeArc |
Token::Schedule |
Token::Behavior |
Token::Institution |
Token::Relationship |
Token::Location |
Token::Species |
Token::Enum
);
let is_identifier = matches!(tok, Token::Ident(_));
// Case 1: Missing closing brace before new declaration
if expecting_close_brace && is_declaration_keyword {
// Find the last non-comment identifier token and the declaration
// keyword
let mut last_ident_name = None;
let mut last_ident_span = None;
let mut decl_keyword = None;
for (tok_start, token, tok_end) in tokens.iter().rev() {
match token {
| Token::Ident(name) if last_ident_name.is_none() => {
last_ident_name = Some(name.clone());
last_ident_span = Some((*tok_start, *tok_end));
},
| Token::Character |
Token::Template |
Token::LifeArc |
Token::Schedule |
Token::Behavior |
Token::Institution |
Token::Relationship |
Token::Location |
Token::Species |
Token::Enum => {
decl_keyword = Some(token);
break;
},
| _ => {},
}
}
if let (Some((ident_start, ident_end)), Some(keyword)) =
(last_ident_span, decl_keyword)
{
let decl_type = match keyword {
| Token::Character => "character",
| Token::Template => "template",
| Token::LifeArc => "life_arc",
| Token::Schedule => "schedule",
| Token::Behavior => "behavior",
| Token::Institution => "institution",
| Token::Relationship => "relationship",
| Token::Location => "location",
| Token::Species => "species",
| Token::Enum => "enum",
| _ => "declaration",
};
let decl_name =
last_ident_name.unwrap_or_else(|| "unknown".to_string());
let message = format!(
"Missing closing brace '}}' for {} {}",
decl_type, decl_name
);
(ident_start, ident_end, message)
} else {
(start, end, "Missing closing brace '}'".to_string())
}
}
// Case 2: Missing comma or colon before identifier
else if (expecting_comma || expecting_colon) && is_identifier {
// Find the last identifier token before the error
if let Some((tok_start, Token::Ident(name), tok_end)) = tokens
.iter()
.rev()
.find(|(_, t, _)| matches!(t, Token::Ident(_)))
{
let message = if expecting_comma {
format!("Missing comma after '{}'", name)
} else {
format!("Missing ':' after '{}'", name)
};
(*tok_start, *tok_end, message)
} else {
let message = if expecting_comma {
"Missing comma".to_string()
} else {
"Missing ':'".to_string()
};
(start, end, message)
}
}
// Case 3: Other errors
else {
let message = if expected.len() == 1 {
let expected_token = &expected[0];
if expected_token.contains(";") {
"Missing semicolon".to_string()
} else if expected_token.contains("}") {
"Missing closing brace '}'".to_string()
} else {
format!("Expected {}, found {:?}", expected_token, tok)
}
} else if !expected.is_empty() {
format!("Expected one of: {}, found {:?}", expected.join(", "), tok)
} else {
format!("Unexpected token {:?}", tok)
};
(start, end, message)
}
},
| LalrpopError::ExtraToken {
token: (start, tok, end),
} => (start, end, format!("Extra token {:?}", tok)),
| LalrpopError::User { error } => {
// Our custom ParseError already has span information
// Extract it if it's UnexpectedToken or other variants
use crate::syntax::ParseError as CustomParseError;
match error {
| CustomParseError::UnexpectedToken { token, span } => (
span.offset(),
span.offset() + span.len(),
format!("Unexpected token: {}", token),
),
| CustomParseError::UnexpectedEof { span } => (
span.offset(),
span.offset() + span.len(),
"Unexpected end of file".to_string(),
),
| CustomParseError::InvalidToken { span } => (
span.offset(),
span.offset() + span.len(),
"Invalid token".to_string(),
),
| CustomParseError::UnclosedProseBlock { tag, span } => (
span.offset(),
span.offset() + span.len(),
format!("Unclosed prose block: ---{}", tag),
),
}
},
};
let error = ParseError {
start,
end,
message,
severity: ErrorSeverity::Error,
};
(None, vec![error])
},
}
}
/// Get the word at a byte offset
pub fn word_at_offset(&self, offset: usize) -> Option<String> {
if offset > self.text.len() {
return None;
}
let chars: Vec<char> = self.text.chars().collect();
if chars.is_empty() {
return None;
}
let mut byte_offset = 0;
let mut char_index = 0;
// Find the character index for this byte offset
for (i, ch) in chars.iter().enumerate() {
if byte_offset == offset {
char_index = i;
break;
}
byte_offset += ch.len_utf8();
if i == chars.len() - 1 {
// Last character
char_index = i;
}
}
// Check if we're on a word character
if char_index >= chars.len() || !is_word_char(chars[char_index]) {
return None;
}
// Find word boundaries
let mut start = char_index;
while start > 0 && is_word_char(chars[start - 1]) {
start -= 1;
}
let mut end = char_index + 1;
while end < chars.len() && is_word_char(chars[end]) {
end += 1;
}
Some(chars[start..end].iter().collect())
}
}
fn is_word_char(c: char) -> bool {
c.is_alphanumeric() || c == '_'
}

View File

@@ -0,0 +1,191 @@
//! Edge case tests for document functionality
#[cfg(test)]
mod tests {
use crate::lsp::document::Document;
#[test]
fn test_word_at_offset_unicode() {
let doc = Document::new("character Café { age: 7 }".to_string());
// Test finding "Café"
let word = doc.word_at_offset(10);
assert_eq!(word, Some("Café".to_string()));
}
#[test]
fn test_word_at_offset_underscore() {
let doc = Document::new("character snake_case { }".to_string());
let word = doc.word_at_offset(12);
assert_eq!(word, Some("snake_case".to_string()));
}
#[test]
fn test_word_at_offset_at_start() {
let doc = Document::new("character Alice { }".to_string());
let word = doc.word_at_offset(0);
assert_eq!(word, Some("character".to_string()));
}
#[test]
fn test_word_at_offset_at_end() {
let doc = Document::new("character Alice".to_string());
let word = doc.word_at_offset(14);
assert_eq!(word, Some("Alice".to_string()));
}
#[test]
fn test_word_at_offset_out_of_bounds() {
let doc = Document::new("test".to_string());
let word = doc.word_at_offset(1000);
assert_eq!(word, None);
}
#[test]
fn test_word_at_offset_on_whitespace() {
let doc = Document::new("character Alice".to_string());
let word = doc.word_at_offset(9); // Space between character and Alice
assert_eq!(word, None);
}
#[test]
fn test_word_at_offset_on_punctuation() {
let doc = Document::new("character Alice { }".to_string());
let word = doc.word_at_offset(16); // On '{'
assert_eq!(word, None);
}
#[test]
fn test_update_clears_old_symbols() {
let mut doc = Document::new("character Alice {}".to_string());
assert!(doc.name_table.resolve_name("Alice").is_some());
doc.update("character Bob {}".to_string());
assert!(doc.name_table.resolve_name("Alice").is_none());
assert!(doc.name_table.resolve_name("Bob").is_some());
}
#[test]
fn test_update_with_invalid_syntax() {
let mut doc = Document::new("character Alice {}".to_string());
assert!(doc.ast.is_some());
assert!(doc.parse_errors.is_empty());
doc.update("invalid { }".to_string());
assert!(doc.ast.is_none());
assert!(!doc.parse_errors.is_empty());
}
#[test]
fn test_empty_document_has_no_symbols() {
let doc = Document::new("".to_string());
assert_eq!(doc.name_table.all_entries().count(), 0);
}
#[test]
fn test_symbol_table_with_duplicates() {
let source = r#"
character Alice { age: 7 }
character Alice { age: 8 }
"#;
let doc = Document::new(source.to_string());
// Duplicate declarations should be caught during resolution
// NameTable from_file will fail, so we'll have an empty table and
// resolve_errors
assert!(
!doc.resolve_errors.is_empty(),
"Should have validation error for duplicate"
);
}
#[test]
fn test_mixed_declaration_types() {
let source = r#"
species Human {}
character Alice: Human {}
template Child {}
enum Mood { Happy, Sad }
location Home {}
relationship Friends { Alice as friend {} Bob as friend {} }
"#;
let doc = Document::new(source.to_string());
assert!(doc.name_table.resolve_name("Human").is_some());
assert!(doc.name_table.resolve_name("Alice").is_some());
assert!(doc.name_table.resolve_name("Child").is_some());
assert!(doc.name_table.resolve_name("Mood").is_some());
assert!(doc.name_table.resolve_name("Home").is_some());
assert!(doc.name_table.resolve_name("Friends").is_some());
}
#[test]
fn test_life_arc_symbol_extraction() {
let source = r#"
life_arc Growing {
state child {}
state teen {}
state adult {}
}
"#;
let doc = Document::new(source.to_string());
assert!(doc.name_table.resolve_name("Growing").is_some());
let growing = doc.name_table.resolve_name("Growing").unwrap();
assert_eq!(growing.kind, crate::resolve::names::DeclKind::LifeArc);
}
#[test]
fn test_schedule_symbol_extraction() {
let source = r#"
schedule Daily {
08:00 -> 09:00: breakfast {}
09:00 -> 12:00: work {}
}
"#;
let doc = Document::new(source.to_string());
assert!(doc.name_table.resolve_name("Daily").is_some());
let daily = doc.name_table.resolve_name("Daily").unwrap();
assert_eq!(daily.kind, crate::resolve::names::DeclKind::Schedule);
}
#[test]
fn test_institution_symbol_extraction() {
let source = "institution School { type: education }";
let doc = Document::new(source.to_string());
assert!(doc.name_table.resolve_name("School").is_some());
let school = doc.name_table.resolve_name("School").unwrap();
assert_eq!(school.kind, crate::resolve::names::DeclKind::Institution);
}
#[test]
fn test_very_long_identifier() {
let long_name = "a".repeat(1000);
let source = format!("character {} {{}}", long_name);
let doc = Document::new(source);
assert!(doc.name_table.resolve_name(&long_name).is_some());
}
#[test]
fn test_multiline_document() {
let source = "\n\n\n\ncharacter Alice {\n\n\n age: 7\n\n\n}";
let doc = Document::new(source.to_string());
assert!(doc.ast.is_some());
assert!(doc.name_table.resolve_name("Alice").is_some());
}
}

146
src/lsp/formatting.rs Normal file
View File

@@ -0,0 +1,146 @@
//! Document formatting provider
//!
//! Provides auto-formatting for Storybook files
use tower_lsp::lsp_types::{
FormattingOptions,
Position,
Range,
TextEdit,
};
use super::document::Document;
/// Format the entire document
pub fn format_document(doc: &Document, _options: &FormattingOptions) -> Option<Vec<TextEdit>> {
// Don't format if there are parse errors - the AST would be invalid
// and formatting could produce garbage output
doc.ast.as_ref()?;
// For now, implement basic formatting rules:
// 1. 4-space indentation
// 2. Consistent spacing around colons
// 3. Blank lines between top-level declarations
let formatted = format_text(&doc.text);
if formatted == doc.text {
return None; // No changes needed
}
// Return a single edit that replaces the entire document
Some(vec![TextEdit {
range: Range {
start: Position {
line: 0,
character: 0,
},
end: Position {
line: doc.positions.line_count() as u32,
character: 0,
},
},
new_text: formatted,
}])
}
/// Format the text according to Storybook style rules
pub(crate) fn format_text(text: &str) -> String {
let mut result = String::new();
let mut indent_level = 0;
let mut prev_was_blank = false;
let mut in_prose_block = false;
for line in text.lines() {
let trimmed = line.trim();
// Handle prose blocks - don't format their content
if trimmed.starts_with("---") {
in_prose_block = !in_prose_block;
result.push_str(&" ".repeat(indent_level));
result.push_str(trimmed);
result.push('\n');
continue;
}
if in_prose_block {
// Preserve prose content exactly as-is
result.push_str(line);
result.push('\n');
continue;
}
// Skip blank lines
if trimmed.is_empty() {
if !prev_was_blank {
result.push('\n');
prev_was_blank = true;
}
continue;
}
prev_was_blank = false;
// Adjust indentation based on braces
if trimmed.starts_with('}') {
indent_level = indent_level.saturating_sub(1);
}
// Add indentation
result.push_str(&" ".repeat(indent_level));
// Format the line content
let formatted_line = format_line(trimmed);
result.push_str(&formatted_line);
result.push('\n');
// Increase indentation for opening braces
if trimmed.ends_with('{') {
indent_level += 1;
}
}
result
}
/// Format a single line
fn format_line(line: &str) -> String {
// Ensure consistent spacing around colons in field assignments
if let Some(colon_pos) = line.find(':') {
if !line.starts_with("//") && !line.contains("::") {
let before = line[..colon_pos].trim_end();
let after = line[colon_pos + 1..].trim_start();
return format!("{}: {}", before, after);
}
}
line.to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_basic_formatting() {
let input = "character Alice{age:25}";
let formatted = format_text(input);
// Check key formatting features
assert!(
formatted.contains("age: 25"),
"Should have space after colon"
);
assert!(
formatted.contains("character Alice"),
"Should have character declaration"
);
}
#[test]
fn test_preserve_prose() {
let input = "---backstory\nSome irregular spacing\n---";
let formatted = format_text(input);
assert!(formatted.contains("Some irregular spacing"));
}
}

167
src/lsp/formatting_tests.rs Normal file
View File

@@ -0,0 +1,167 @@
//! Tests for LSP document formatting functionality
//!
//! This module tests auto-formatting for:
//! - Spacing normalization around colons
//! - Indentation preservation
//! - Prose block preservation
//! - Edge cases (empty documents, already formatted)
use tower_lsp::lsp_types::FormattingOptions;
use super::{
document::Document,
formatting::{
format_document,
format_text,
},
};
#[test]
fn test_format_adds_space_after_colon() {
let input = "character Alice{age:7}";
let formatted = format_text(input);
// Main formatting: adds space after colon
assert!(formatted.contains("age: 7"));
}
#[test]
fn test_format_normalizes_colon_spacing() {
let input = "character Alice{age : 7}";
let formatted = format_text(input);
// Should normalize to single space after colon
assert!(formatted.contains("age: 7"));
assert!(!formatted.contains("age :"));
}
#[test]
fn test_format_multiple_fields() {
let input = "character Alice{age:7\nname:\"Alice\"}";
let formatted = format_text(input);
assert!(formatted.contains("age: 7"));
assert!(formatted.contains("name: \"Alice\""));
}
#[test]
fn test_format_preserves_module_paths() {
let input = "use characters::Alice;";
let formatted = format_text(input);
// Should NOT add space in :: paths
assert!(formatted.contains("characters::Alice"));
assert!(!formatted.contains("characters: :Alice"));
}
#[test]
fn test_format_preserves_prose_blocks() {
let input = r#"character Alice{
---backstory
Some irregular spacing here
---
age:7}"#;
let formatted = format_text(input);
// Prose content preserved exactly
assert!(formatted.contains("Some irregular spacing here"));
// But other fields still formatted
assert!(formatted.contains("age: 7"));
}
#[test]
fn test_format_indentation_with_braces() {
let input = "character Alice{\nage:7\n}";
let formatted = format_text(input);
// Check indentation (4 spaces)
assert!(formatted.contains("character Alice{"));
assert!(formatted.contains(" age: 7"));
assert!(formatted.contains("}"));
}
#[test]
fn test_format_nested_indentation() {
let input = "template Person{\nnested:{\nvalue:123\n}\n}";
let formatted = format_text(input);
assert!(formatted.contains(" nested: {"));
assert!(formatted.contains(" value: 123"));
assert!(formatted.contains(" }"));
}
#[test]
fn test_format_empty_document() {
let input = "";
let formatted = format_text(input);
assert_eq!(formatted, "");
}
#[test]
fn test_format_whitespace_only() {
let input = " \n \n ";
let formatted = format_text(input);
// Should collapse to single newline or empty
assert!(formatted.len() <= 2);
}
#[test]
fn test_format_already_formatted() {
let input = "character Alice{\n age: 7\n}";
let formatted = format_text(input);
// Should remain essentially the same
assert!(formatted.contains("age: 7"));
}
#[test]
fn test_format_removes_multiple_blank_lines() {
let input = "character Alice{age:7}\n\n\n\ncharacter Bob{age:10}";
let formatted = format_text(input);
// Should consolidate multiple blank lines
let double_newlines = formatted.matches("\n\n\n").count();
assert_eq!(double_newlines, 0, "Should not have triple newlines");
}
#[test]
fn test_format_comments_unchanged() {
let input = "// Comment\ncharacter Alice{age:7}";
let formatted = format_text(input);
assert!(formatted.contains("// Comment"));
}
#[test]
fn test_format_document_returns_text_edit() {
let input = "character Alice{age:7}";
let doc = Document::new(input.to_string());
let result = format_document(&doc, &FormattingOptions::default());
if doc.ast.is_some() {
assert!(
result.is_some(),
"Should return TextEdit for valid document"
);
}
}
#[test]
fn test_format_document_no_changes_returns_none() {
let input = "character Alice{\n age: 7\n}\n";
let doc = Document::new(input.to_string());
let result = format_document(&doc, &FormattingOptions::default());
// If document is already formatted, may return None
// (depends on exact formatting match)
if result.is_none() {
assert!(
doc.ast.is_some(),
"Should only return None if formatting matches"
);
}
}

636
src/lsp/hover.rs Normal file
View File

@@ -0,0 +1,636 @@
//! Hover information provider
use tower_lsp::lsp_types::{
Hover,
HoverContents,
MarkupContent,
MarkupKind,
};
use crate::{
lsp::document::Document,
resolve::names::DeclKind,
syntax::{
ast::{
Declaration,
Value,
},
lexer::{
Lexer,
Token,
},
},
};
/// Get hover information at a position
pub fn get_hover_info(text: &str, line: usize, character: usize) -> Option<Hover> {
// Calculate absolute byte offset from line/character position
let mut byte_offset = 0;
let mut found_line = false;
for (current_line, line_text) in text.lines().enumerate() {
if current_line == line {
found_line = true;
// Check if character position is beyond the line
let line_char_count = line_text.chars().count();
if character >= line_char_count {
return None;
}
// Add the character offset (assuming UTF-8)
let mut char_count = 0;
for (byte_pos, _) in line_text.char_indices() {
if char_count == character {
byte_offset += byte_pos;
break;
}
char_count += 1;
}
break;
}
byte_offset += line_text.len() + 1; // +1 for newline
}
// If line was not found, return None
if !found_line {
return None;
}
// Tokenize and find the token at the cursor position
let lexer = Lexer::new(text);
let mut target_token = None;
for (offset, token, end) in lexer {
if offset <= byte_offset && byte_offset < end {
target_token = Some(token);
break;
}
}
let token = target_token?;
// Generate hover info based on the token
let content = get_token_documentation(&token)?;
Some(Hover {
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: content.to_string(),
}),
range: None,
})
}
/// Get documentation for a token
fn get_token_documentation(token: &Token) -> Option<&'static str> {
match token {
Token::Character => Some("**character** - Defines a character entity\n\nSyntax: `character Name { ... }`"),
Token::Template => Some("**template** - Defines a reusable field template\n\nSyntax: `template Name { ... }`"),
Token::LifeArc => Some("**life_arc** - Defines a state machine for character development\n\nSyntax: `life_arc Name { ... }`"),
Token::Schedule => Some("**schedule** - Defines a daily schedule or routine\n\nSyntax: `schedule Name { ... }`"),
Token::Behavior => Some("**behavior** - Defines a behavior tree for AI\n\nSyntax: `behavior Name { ... }`"),
Token::Institution => Some("**institution** - Defines an organization or group\n\nSyntax: `institution Name { ... }`"),
Token::Relationship => Some("**relationship** - Defines a multi-party relationship\n\nSyntax: `relationship Name { ... }`"),
Token::Location => Some("**location** - Defines a place or setting\n\nSyntax: `location Name { ... }`"),
Token::Species => Some("**species** - Defines a species with templates\n\nSyntax: `species Name { ... }`"),
Token::Enum => Some("**enum** - Defines an enumeration type\n\nSyntax: `enum Name { ... }`"),
Token::Use => Some("**use** - Imports declarations from other files\n\nSyntax: `use path::to::item;`"),
Token::From => Some("**from** - Applies templates to a character\n\nSyntax: `character Name from Template { ... }`"),
Token::Include => Some("**include** - Includes another template\n\nSyntax: `include TemplateName`"),
Token::State => Some("**state** - Defines a state in a life arc\n\nSyntax: `state name { ... }`"),
Token::On => Some("**on** - Defines a transition or enter handler\n\nSyntax: `on condition -> target` or `on enter { ... }`"),
Token::Strict => Some("**strict** - Enforces that a template only accepts defined fields"),
_ => None,
}
}
/// Get semantic hover information for symbols
pub fn get_semantic_hover_info(doc: &Document, line: usize, character: usize) -> Option<Hover> {
let ast = doc.ast.as_ref()?;
// Calculate absolute byte offset from line/character position
let mut byte_offset = 0;
let mut found_line = false;
for (current_line, line_text) in doc.text.lines().enumerate() {
if current_line == line {
found_line = true;
// Check if character position is beyond the line
let line_char_count = line_text.chars().count();
if character >= line_char_count {
return None;
}
let mut char_count = 0;
for (byte_pos, _) in line_text.char_indices() {
if char_count == character {
byte_offset += byte_pos;
break;
}
char_count += 1;
}
break;
}
byte_offset += line_text.len() + 1; // +1 for newline
}
if !found_line {
return None;
}
// Tokenize and find the identifier at the cursor position
let lexer = Lexer::new(&doc.text);
let mut target_ident = None;
for (offset, token, end) in lexer {
if offset <= byte_offset && byte_offset < end {
if let Token::Ident(name) = token {
target_ident = Some(name);
}
break;
}
}
let word = target_ident?;
// Look up the symbol in the name table
let symbol_info = doc.name_table.lookup(&[word.clone()])?;
// Find the declaration in the AST
for decl in &ast.declarations {
let decl_name = get_declaration_name(decl);
if decl_name.as_deref() == Some(word.as_str()) {
return Some(format_declaration_hover(decl, &symbol_info.kind));
}
}
None
}
/// Extract the name from a declaration
fn get_declaration_name(decl: &Declaration) -> Option<String> {
match decl {
| Declaration::Character(c) => Some(c.name.clone()),
| Declaration::Template(t) => Some(t.name.clone()),
| Declaration::Species(s) => Some(s.name.clone()),
| Declaration::Enum(e) => Some(e.name.clone()),
| Declaration::Location(l) => Some(l.name.clone()),
| Declaration::Institution(i) => Some(i.name.clone()),
| Declaration::Relationship(r) => Some(r.name.clone()),
| Declaration::LifeArc(la) => Some(la.name.clone()),
| Declaration::Schedule(s) => Some(s.name.clone()),
| Declaration::Behavior(b) => Some(b.name.clone()),
| Declaration::Use(_) => None,
}
}
/// Format hover information for a declaration
fn format_declaration_hover(decl: &Declaration, _kind: &DeclKind) -> Hover {
let content = match decl {
| Declaration::Character(c) => format_character_hover(c),
| Declaration::Template(t) => format_template_hover(t),
| Declaration::Species(s) => format_species_hover(s),
| Declaration::Enum(e) => format_enum_hover(e),
| Declaration::Location(l) => format_location_hover(l),
| Declaration::Institution(i) => format_institution_hover(i),
| Declaration::Relationship(r) => format_relationship_hover(r),
| Declaration::LifeArc(la) => format_life_arc_hover(la),
| Declaration::Schedule(s) => format_schedule_hover(s),
| Declaration::Behavior(b) => format_behavior_hover(b),
| Declaration::Use(_) => "**use** declaration".to_string(),
};
Hover {
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: content,
}),
range: None,
}
}
/// Format character hover information
fn format_character_hover(c: &crate::syntax::ast::Character) -> String {
let mut content = format!("**character** `{}`\n\n", c.name);
// Species
if let Some(ref species) = c.species {
content.push_str(&format!("**Species:** `{}`\n\n", species));
}
// Templates
if let Some(ref templates) = c.template {
content.push_str(&format!(
"**Templates:** {}\n\n",
templates
.iter()
.map(|t| format!("`{}`", t))
.collect::<Vec<_>>()
.join(", ")
));
}
// Fields
if !c.fields.is_empty() {
content.push_str("**Fields:**\n");
for field in &c.fields {
let value_preview = format_value_preview(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, value_preview));
}
content.push('\n');
}
// Prose blocks count
let prose_count = c
.fields
.iter()
.filter(|f| matches!(f.value, Value::ProseBlock(_)))
.count();
if prose_count > 0 {
content.push_str(&format!("*{} prose block(s)*\n", prose_count));
}
content
}
/// Format template hover information
fn format_template_hover(t: &crate::syntax::ast::Template) -> String {
let mut content = format!("**template** `{}`\n\n", t.name);
if t.strict {
content.push_str("*strict mode*\n\n");
}
// Includes
if !t.includes.is_empty() {
content.push_str(&format!(
"**Includes:** {}\n\n",
t.includes
.iter()
.map(|i| format!("`{}`", i))
.collect::<Vec<_>>()
.join(", ")
));
}
// Fields with types
if !t.fields.is_empty() {
content.push_str("**Fields:**\n");
for field in &t.fields {
let type_name = format_value_as_type(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, type_name));
}
content.push('\n');
}
content
}
/// Format species hover information
fn format_species_hover(s: &crate::syntax::ast::Species) -> String {
let mut content = format!("**species** `{}`\n\n", s.name);
// Includes
if !s.includes.is_empty() {
content.push_str(&format!(
"**Includes:** {}\n\n",
s.includes
.iter()
.map(|i| format!("`{}`", i))
.collect::<Vec<_>>()
.join(", ")
));
}
// Fields with types
if !s.fields.is_empty() {
content.push_str("**Fields:**\n");
for field in &s.fields {
let type_name = format_value_as_type(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, type_name));
}
content.push('\n');
}
content
}
/// Format enum hover information
fn format_enum_hover(e: &crate::syntax::ast::EnumDecl) -> String {
let mut content = format!("**enum** `{}`\n\n", e.name);
if !e.variants.is_empty() {
content.push_str("**Variants:**\n");
for variant in &e.variants {
content.push_str(&format!("- `{}`\n", variant));
}
content.push('\n');
}
content
}
/// Format location hover information
fn format_location_hover(l: &crate::syntax::ast::Location) -> String {
let mut content = format!("**location** `{}`\n\n", l.name);
if !l.fields.is_empty() {
content.push_str("**Properties:**\n");
for field in &l.fields {
let value_preview = format_value_preview(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, value_preview));
}
content.push('\n');
}
content
}
/// Format institution hover information
fn format_institution_hover(i: &crate::syntax::ast::Institution) -> String {
let mut content = format!("**institution** `{}`\n\n", i.name);
if !i.fields.is_empty() {
content.push_str("**Properties:**\n");
for field in &i.fields {
let value_preview = format_value_preview(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, value_preview));
}
content.push('\n');
}
content
}
/// Format relationship hover information
fn format_relationship_hover(r: &crate::syntax::ast::Relationship) -> String {
let mut content = format!("**relationship** `{}`\n\n", r.name);
// Participants
if !r.participants.is_empty() {
content.push_str(&format!(
"**Participants:** {}\n\n",
r.participants
.iter()
.map(|p| {
let name = p.name.join(".");
if let Some(ref role) = p.role {
format!("`{}` as {}", name, role)
} else {
format!("`{}`", name)
}
})
.collect::<Vec<_>>()
.join(", ")
));
}
// Fields
if !r.fields.is_empty() {
content.push_str("**Fields:**\n");
for field in &r.fields {
let value_preview = format_value_preview(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, value_preview));
}
content.push('\n');
}
content
}
/// Format life arc hover information
fn format_life_arc_hover(la: &crate::syntax::ast::LifeArc) -> String {
let mut content = format!("**life_arc** `{}`\n\n", la.name);
if !la.states.is_empty() {
content.push_str(&format!("**States:** {} states\n\n", la.states.len()));
// Show first few states
let preview_count = 5;
for state in la.states.iter().take(preview_count) {
content.push_str(&format!(
"- `{}` ({} transitions)\n",
state.name,
state.transitions.len()
));
}
if la.states.len() > preview_count {
content.push_str(&format!(
"- *... and {} more*\n",
la.states.len() - preview_count
));
}
content.push('\n');
}
content
}
/// Format schedule hover information
fn format_schedule_hover(s: &crate::syntax::ast::Schedule) -> String {
let mut content = format!("**schedule** `{}`\n\n", s.name);
if !s.blocks.is_empty() {
content.push_str(&format!("**Time Blocks:** {} entries\n\n", s.blocks.len()));
// Show first few blocks
let preview_count = 5;
for block in s.blocks.iter().take(preview_count) {
let start_str = format_time(&block.start);
let end_str = format_time(&block.end);
content.push_str(&format!(
"- {} - {}: {}\n",
start_str, end_str, block.activity
));
}
if s.blocks.len() > preview_count {
content.push_str(&format!(
"- *... and {} more*\n",
s.blocks.len() - preview_count
));
}
content.push('\n');
}
content
}
/// Format behavior hover information
fn format_behavior_hover(b: &crate::syntax::ast::Behavior) -> String {
let mut content = format!("**behavior** `{}`\n\n", b.name);
content.push_str("**Behavior Tree:**\n");
content.push_str(&format_behavior_node_preview(&b.root, 0));
content.push('\n');
content
}
/// Format a behavior tree node preview (recursively, up to depth 2)
fn format_behavior_node_preview(node: &crate::syntax::ast::BehaviorNode, depth: usize) -> String {
if depth > 2 {
return format!("{} *...*\n", " ".repeat(depth));
}
let indent = " ".repeat(depth);
let mut content = String::new();
match node {
| crate::syntax::ast::BehaviorNode::Action(name, params) => {
content.push_str(&format!("{}- Action: `{}`", indent, name));
if !params.is_empty() {
content.push_str(&format!(" ({} params)", params.len()));
}
content.push('\n');
},
| crate::syntax::ast::BehaviorNode::Sequence { children, .. } => {
content.push_str(&format!(
"{}- Sequence ({} children)\n",
indent,
children.len()
));
for child in children.iter().take(3) {
content.push_str(&format_behavior_node_preview(child, depth + 1));
}
if children.len() > 3 {
content.push_str(&format!(
"{} *... and {} more*\n",
indent,
children.len() - 3
));
}
},
| crate::syntax::ast::BehaviorNode::Selector { children, .. } => {
content.push_str(&format!(
"{}- Selector ({} children)\n",
indent,
children.len()
));
for child in children.iter().take(3) {
content.push_str(&format_behavior_node_preview(child, depth + 1));
}
if children.len() > 3 {
content.push_str(&format!(
"{} *... and {} more*\n",
indent,
children.len() - 3
));
}
},
| crate::syntax::ast::BehaviorNode::Condition(_) => {
content.push_str(&format!("{}- Condition\n", indent));
},
| crate::syntax::ast::BehaviorNode::Decorator {
decorator_type,
child,
..
} => {
content.push_str(&format!("{}- Decorator: `{:?}`\n", indent, decorator_type));
content.push_str(&format_behavior_node_preview(child, depth + 1));
},
| crate::syntax::ast::BehaviorNode::SubTree(name) => {
content.push_str(&format!("{}- SubTree: `{}`\n", indent, name.join(".")));
},
}
content
}
/// Format a value as a type name (for template/species fields)
fn format_value_as_type(value: &Value) -> String {
match value {
| Value::Identifier(path) => path.join("."),
| Value::String(_) => "String".to_string(),
| Value::Int(_) => "Int".to_string(),
| Value::Float(_) => "Float".to_string(),
| Value::Bool(_) => "Bool".to_string(),
| Value::List(items) => {
if items.is_empty() {
"List".to_string()
} else {
format!("[{}]", format_value_as_type(&items[0]))
}
},
| Value::Object(_) => "Object".to_string(),
| Value::Range(start, end) => {
format!(
"{}..{}",
format_value_as_type(start),
format_value_as_type(end)
)
},
| Value::Time(_) => "Time".to_string(),
| Value::Duration(_) => "Duration".to_string(),
| Value::ProseBlock(_) => "ProseBlock".to_string(),
| Value::Override(_) => "Override".to_string(),
}
}
/// Format a value preview (for character/location fields)
fn format_value_preview(value: &Value) -> String {
match value {
| Value::Identifier(path) => format!("`{}`", path.join(".")),
| Value::String(s) => format!("\"{}\"", truncate(s, 50)),
| Value::Int(n) => n.to_string(),
| Value::Float(f) => f.to_string(),
| Value::Bool(b) => b.to_string(),
| Value::List(items) => {
if items.is_empty() {
"[]".to_string()
} else {
format!("[{} items]", items.len())
}
},
| Value::Object(fields) => format!("{{{} fields}}", fields.len()),
| Value::Range(start, end) => {
format!(
"{}..{}",
format_value_preview(start),
format_value_preview(end)
)
},
| Value::Time(time) => format_time(time),
| Value::Duration(duration) => format_duration(duration),
| Value::ProseBlock(prose) => format!("*prose ({} chars)*", prose.content.len()),
| Value::Override(override_val) => format!("*{} overrides*", override_val.overrides.len()),
}
}
/// Format a time value
fn format_time(time: &crate::syntax::ast::Time) -> String {
if time.second == 0 {
format!("{:02}:{:02}", time.hour, time.minute)
} else {
format!("{:02}:{:02}:{:02}", time.hour, time.minute, time.second)
}
}
/// Format a duration value
fn format_duration(duration: &crate::syntax::ast::Duration) -> String {
let mut parts = Vec::new();
if duration.hours > 0 {
parts.push(format!("{}h", duration.hours));
}
if duration.minutes > 0 {
parts.push(format!("{}m", duration.minutes));
}
if duration.seconds > 0 {
parts.push(format!("{}s", duration.seconds));
}
if parts.is_empty() {
"0s".to_string()
} else {
parts.join(" ")
}
}
/// Truncate a string to a maximum length
fn truncate(s: &str, max_len: usize) -> String {
if s.len() <= max_len {
s.to_string()
} else {
format!("{}...", &s[..max_len - 3])
}
}

350
src/lsp/hover_tests.rs Normal file
View File

@@ -0,0 +1,350 @@
//! Tests for LSP hover functionality
//!
//! This module tests hover information display for:
//! - Keywords (character, template, behavior, etc.)
//! - Type information for symbols
//! - Documentation display
//! - Edge cases (whitespace, EOF, invalid positions)
use tower_lsp::lsp_types::{
HoverContents,
MarkupContent,
MarkupKind,
};
use super::hover::get_hover_info;
#[test]
fn test_hover_on_character_keyword() {
let source = "character Alice { age: 7 }";
// Hover over "character" keyword at position 0,5 (middle of "character")
let hover = get_hover_info(source, 0, 5);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { kind, value }) => {
assert_eq!(kind, MarkupKind::Markdown);
assert!(value.contains("character"));
assert!(value.contains("Defines a character entity"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_template_keyword() {
let source = "template Person { name: String }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("template"));
assert!(value.contains("reusable field template"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_behavior_keyword() {
let source = "behavior WalkAround { walk_to_garden }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("behavior"));
assert!(value.contains("behavior tree"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_life_arc_keyword() {
let source = "life_arc Growth { state child { } }";
let hover = get_hover_info(source, 0, 5);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("life_arc"));
assert!(value.contains("state machine"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_schedule_keyword() {
let source = "schedule Daily { 08:00-12:00 work }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("schedule"));
assert!(value.contains("daily schedule"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_institution_keyword() {
let source = "institution Bakery { location: wonderland }";
let hover = get_hover_info(source, 0, 6);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("institution"));
assert!(value.contains("organization"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_relationship_keyword() {
let source = "relationship Friendship { participants: [Alice, Bob] }";
let hover = get_hover_info(source, 0, 8);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("relationship"));
assert!(value.contains("multi-party"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_location_keyword() {
let source = "location Garden { description: \"A beautiful garden\" }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("location"));
assert!(value.contains("place"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_species_keyword() {
let source = "species Human { include CommonTraits }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("species"));
assert!(value.contains("templates"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_enum_keyword() {
let source = "enum Emotion { Happy, Sad, Angry }";
let hover = get_hover_info(source, 0, 2);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("enum"));
assert!(value.contains("enumeration"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_use_keyword() {
let source = "use characters::Alice;";
let hover = get_hover_info(source, 0, 1);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("use"));
assert!(value.contains("Imports"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_from_keyword() {
let source = "character Alice from Person { }";
let hover = get_hover_info(source, 0, 17);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("from"));
assert!(value.contains("templates"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_state_keyword() {
let source = "life_arc Growth { state child { } }";
let hover = get_hover_info(source, 0, 19);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("state"));
assert!(value.contains("life arc"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_whitespace_returns_none() {
let source = "character Alice { age: 7 }";
// Hover over whitespace between "character" and "Alice"
let hover = get_hover_info(source, 0, 10);
assert!(hover.is_none(), "Hovering on whitespace should return None");
}
#[test]
fn test_hover_on_unknown_word_returns_none() {
let source = "character Alice { age: 7 }";
// Hover over "Alice" (not a keyword)
let hover = get_hover_info(source, 0, 12);
assert!(
hover.is_none(),
"Hovering on non-keyword should return None"
);
}
#[test]
fn test_hover_at_eof_returns_none() {
let source = "character Alice { age: 7 }";
// Try to hover beyond the line
let hover = get_hover_info(source, 0, 100);
assert!(hover.is_none(), "Hovering beyond line should return None");
}
#[test]
fn test_hover_on_invalid_line_returns_none() {
let source = "character Alice { age: 7 }";
// Try to hover on a line that doesn't exist
let hover = get_hover_info(source, 100, 0);
assert!(
hover.is_none(),
"Hovering on invalid line should return None"
);
}
#[test]
fn test_hover_on_comment_returns_none() {
let source = "// This is a comment\ncharacter Alice { }";
// Hover over the comment
let hover = get_hover_info(source, 0, 5);
// Comments don't contain keywords, so this should return None
assert!(hover.is_none(), "Hovering on comment should return None");
}
#[test]
fn test_hover_multiline_document() {
let source = r#"
character Alice { age: 7 }
template Person { name: String }
behavior Walk { walk_around }
"#;
// Hover on "template" keyword on line 2
let hover = get_hover_info(source, 2, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("template"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_preserves_markdown_formatting() {
let source = "character Alice { }";
let hover = get_hover_info(source, 0, 5);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { kind, value }) => {
assert_eq!(kind, MarkupKind::Markdown);
// Check for markdown formatting
assert!(value.contains("**character**") || value.contains("character"));
assert!(value.contains("`character Name { ... }`") || value.contains("Syntax"));
},
| _ => panic!("Expected markup content"),
}
}

205
src/lsp/inlay_hints.rs Normal file
View File

@@ -0,0 +1,205 @@
//! Inlay hints for implicit information
//!
//! Provides inline annotations showing:
//! - Parameter names in action calls
//! - Inferred types for field values
//! - Template/species field sources
use tower_lsp::lsp_types::{
InlayHint,
InlayHintKind,
InlayHintLabel,
Position,
};
use super::document::Document;
use crate::syntax::ast::{
Declaration,
Field,
Value,
};
/// Get inlay hints for a document range
pub fn get_inlay_hints(doc: &Document, start: Position, end: Position) -> Option<Vec<InlayHint>> {
let ast = doc.ast.as_ref()?;
let mut hints = Vec::new();
let mut positions = doc.positions.clone();
// Convert positions to offsets
let start_line = start.line as usize;
let end_line = end.line as usize;
// Process all declarations
for decl in &ast.declarations {
match decl {
| Declaration::Character(character) => {
// Skip if character is outside requested range
if character.span.start_line > end_line || character.span.end_line < start_line {
continue;
}
// Add type hints for character fields
for field in &character.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Template(template) => {
// Skip if template is outside requested range
if template.span.start_line > end_line || template.span.end_line < start_line {
continue;
}
// Add type hints for template fields
for field in &template.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Species(species) => {
// Skip if species is outside requested range
if species.span.start_line > end_line || species.span.end_line < start_line {
continue;
}
// Add type hints for species fields
for field in &species.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Institution(institution) => {
// Skip if institution is outside requested range
if institution.span.start_line > end_line || institution.span.end_line < start_line
{
continue;
}
// Add type hints for institution fields
for field in &institution.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Location(location) => {
// Skip if location is outside requested range
if location.span.start_line > end_line || location.span.end_line < start_line {
continue;
}
// Add type hints for location fields
for field in &location.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Relationship(relationship) => {
// Skip if relationship is outside requested range
if relationship.span.start_line > end_line ||
relationship.span.end_line < start_line
{
continue;
}
// Add type hints for relationship fields
for field in &relationship.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Behavior(behavior) => {
// Skip if behavior is outside requested range
if behavior.span.start_line > end_line || behavior.span.end_line < start_line {
continue;
}
// TODO: Add parameter name hints for action calls in behavior
// trees Would need to traverse BehaviorNode
// tree and match actions to schema
},
| _ => {},
}
}
if hints.is_empty() {
None
} else {
Some(hints)
}
}
/// Add type hint for a field value
fn add_type_hint(
hints: &mut Vec<InlayHint>,
positions: &mut crate::position::PositionTracker,
field: &Field,
) {
let type_str = infer_value_type(&field.value);
// Only add hints for non-obvious types
// Skip if the type is clear from the literal (e.g., "string", 123, true)
let should_hint = match &field.value {
| Value::String(_) | Value::Int(_) | Value::Float(_) | Value::Bool(_) => false,
| Value::Identifier(_) => true, // Show type for identifier references
| Value::List(_) => true, // Show list element type
| Value::Object(_) => false, // Object structure is visible
| Value::Range(_, _) => false, // Range syntax is clear
| Value::Time(_) => false, // Time format is clear
| Value::Duration(_) => false, // Duration format is clear
| Value::ProseBlock(_) => false, // Prose is obvious
| Value::Override(_) => true, // Show what's being overridden
};
if !should_hint {
return;
}
// Position the hint at the end of the field value
let (line, col) = positions.offset_to_position(field.span.end);
hints.push(InlayHint {
position: Position {
line: line as u32,
character: col as u32,
},
label: InlayHintLabel::String(format!(": {}", type_str)),
kind: Some(InlayHintKind::TYPE),
text_edits: None,
tooltip: None,
padding_left: Some(true),
padding_right: None,
data: None,
});
}
/// Infer the type of a value for display
fn infer_value_type(value: &Value) -> String {
match value {
| Value::Identifier(path) => path.join("."),
| Value::String(_) => "String".to_string(),
| Value::Int(_) => "Int".to_string(),
| Value::Float(_) => "Float".to_string(),
| Value::Bool(_) => "Bool".to_string(),
| Value::List(items) => {
if items.is_empty() {
"[]".to_string()
} else {
format!("[{}]", infer_value_type(&items[0]))
}
},
| Value::Object(_) => "Object".to_string(),
| Value::Range(start, end) => {
format!("{}..{}", infer_value_type(start), infer_value_type(end))
},
| Value::Time(_) => "Time".to_string(),
| Value::Duration(_) => "Duration".to_string(),
| Value::ProseBlock(_) => "Prose".to_string(),
| Value::Override(_) => "Override".to_string(),
}
}

59
src/lsp/mod.rs Normal file
View File

@@ -0,0 +1,59 @@
//! LSP (Language Server Protocol) implementation for Storybook DSL
//!
//! This module provides language server features including:
//! - Real-time diagnostics (validation errors/warnings)
//! - Hover information (documentation, type info)
//! - Document symbols (outline view)
//! - Go-to-definition
//! - Find references
//! - Autocomplete
//! - Document formatting
pub mod code_actions;
pub mod completion;
pub mod definition;
pub mod diagnostics;
pub mod document;
pub mod formatting;
pub mod hover;
pub mod inlay_hints;
pub mod references;
pub mod rename;
pub mod semantic_tokens;
pub mod server;
pub mod symbols;
#[cfg(test)]
mod tests;
#[cfg(test)]
mod parser_test;
#[cfg(test)]
mod behavior_tests;
#[cfg(test)]
mod diagnostics_tests;
#[cfg(test)]
mod document_edge_tests;
#[cfg(test)]
mod navigation_tests;
#[cfg(test)]
mod validation_tests;
#[cfg(test)]
mod completion_tests;
#[cfg(test)]
mod code_actions_tests;
#[cfg(test)]
mod hover_tests;
#[cfg(test)]
mod formatting_tests;
pub use server::StorybookLanguageServer;

389
src/lsp/navigation_tests.rs Normal file
View File

@@ -0,0 +1,389 @@
//! Tests for navigation features (go-to-definition and find-references)
#[cfg(test)]
mod tests {
use tower_lsp::lsp_types::{
GotoDefinitionParams,
Position,
ReferenceContext,
ReferenceParams,
TextDocumentIdentifier,
TextDocumentPositionParams,
Url,
};
use crate::lsp::{
definition,
document::Document,
references,
};
fn make_uri() -> Url {
Url::parse("file:///test.sb").unwrap()
}
#[test]
fn test_goto_definition_character() {
let source = "character Alice { age: 7 }";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Alice" (at character 10)
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 10,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(result.is_some(), "Should find definition for Alice");
// Verify it points to the character declaration
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::GotoDefinitionResponse::Scalar(location) => {
assert_eq!(location.uri, uri);
// Should span the whole character declaration
assert_eq!(location.range.start.line, 0);
},
| _ => panic!("Expected scalar location"),
}
}
}
#[test]
fn test_goto_definition_not_found() {
let source = "character Alice { age: 7 }";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on whitespace
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 0,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(result.is_none(), "Should not find definition on whitespace");
}
#[test]
fn test_goto_definition_template() {
let source = r#"
template Child {
age: number
}
character Alice: Child {}
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Child" in template declaration (line 1)
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 9,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(
result.is_some(),
"Should find definition for Child template"
);
}
#[test]
fn test_find_references_character() {
let source = r#"
character Alice { age: 7 }
character Bob { friend: Alice }
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Alice" in first line
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 10,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find references to Alice");
if let Some(locations) = result {
// Should find at least 2 references: the declaration and the use in Bob's
// friend field
assert!(
locations.len() >= 2,
"Should find multiple references to Alice, found {}",
locations.len()
);
}
}
#[test]
fn test_find_references_single_occurrence() {
let source = "character Bob { age: 5 }";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Bob"
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 10,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find reference to Bob");
if let Some(locations) = result {
assert_eq!(locations.len(), 1, "Should find exactly one reference");
}
}
#[test]
fn test_find_references_not_found() {
let source = "character Alice { age: 7 }";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on punctuation character (the opening brace)
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 16,
}, // Position on '{'
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
// It's okay if this finds something or nothing - the important thing is it
// doesn't crash If word_at_offset returns None, this will be None
// If it returns the nearby word, that's also acceptable behavior
let _ = result; // Just verify it doesn't panic
}
#[test]
fn test_find_references_word_boundaries() {
let source = r#"
character Alice { age: 7 }
character Alison { age: 8 }
character Ali { age: 6 }
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Alice"
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 10,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find references to Alice");
if let Some(locations) = result {
// Should only find "Alice", not "Alison" or "Ali"
assert_eq!(locations.len(), 1, "Should only find exact match for Alice");
}
}
#[test]
fn test_find_references_multiple_files_same_name() {
let source = r#"
template Child { age: number }
character Alice: Child { age: 7 }
character Bob: Child { age: 5 }
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Child" in template declaration
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 9,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find references to Child template");
if let Some(locations) = result {
// Should find declaration + 2 uses (Alice: Child, Bob: Child)
assert!(
locations.len() >= 3,
"Should find template declaration and uses, found {}",
locations.len()
);
}
}
#[test]
fn test_goto_definition_behavior() {
let source = r#"
behavior WalkAround {
patrol
}
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "WalkAround"
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 9,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(
result.is_some(),
"Should find definition for WalkAround behavior"
);
}
#[test]
fn test_find_references_species() {
let source = r#"
species Human {}
character Alice: Human {}
character Bob: Human {}
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Human" in species declaration
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 8,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find references to Human species");
if let Some(locations) = result {
// Should find declaration + 2 uses
assert!(
locations.len() >= 3,
"Should find species declaration and character uses, found {}",
locations.len()
);
}
}
#[test]
fn test_goto_definition_multiline() {
// Don't start with newline to make line numbers clearer
let source = "character Alice {\n age: 7\n}\n\ncharacter Bob {\n friend: Alice\n}";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Alice" in Bob's friend field (line 5)
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 5,
character: 12,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(result.is_some(), "Should find definition for Alice");
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::GotoDefinitionResponse::Scalar(location) => {
// Should point to Alice's declaration on line 0
assert_eq!(location.range.start.line, 0);
},
| _ => panic!("Expected scalar location"),
}
}
}
}

66
src/lsp/parser_test.rs Normal file
View File

@@ -0,0 +1,66 @@
//! Quick parser test to debug parsing issues
#[cfg(test)]
mod tests {
use crate::lsp::document::Document;
#[test]
fn test_simple_character() {
let input = "character Alice { age: 7 }";
let doc = Document::new(input.to_string());
println!("Parse errors: {:?}", doc.parse_errors);
if doc.ast.is_some() {
println!("SUCCESS: Parsed AST");
} else {
println!("FAILED: No AST produced");
}
assert!(doc.ast.is_some(), "Should parse simple character");
assert!(doc.parse_errors.is_empty(), "Should have no parse errors");
}
#[test]
fn test_species_first() {
let input = r#"
species Human {
intelligence: high
}
character Alice: Human {
age: 7
}
"#;
let doc = Document::new(input.to_string());
if !doc.parse_errors.is_empty() {
for err in &doc.parse_errors {
eprintln!("Parse error: {}", err.message);
}
}
assert!(doc.ast.is_some(), "Should parse species and character");
assert!(doc.parse_errors.is_empty(), "Should have no parse errors");
}
#[test]
fn test_with_prose() {
let input = r#"
character Alice {
age: 7
---backstory
A curious girl
---
}
"#;
let doc = Document::new(input.to_string());
if !doc.parse_errors.is_empty() {
for err in &doc.parse_errors {
eprintln!("Parse error: {}", err.message);
}
}
assert!(doc.ast.is_some(), "Should parse character with prose");
}
}

78
src/lsp/references.rs Normal file
View File

@@ -0,0 +1,78 @@
//! Find references provider
//!
//! Finds all references to a symbol across the document
use tower_lsp::lsp_types::{
Location,
Range,
ReferenceParams,
Url,
};
use super::document::Document;
/// Find all references to a symbol at a position
pub fn find_references(
doc: &Document,
params: &ReferenceParams,
uri: &Url,
) -> Option<Vec<Location>> {
let position = params.text_document_position.position;
// Convert LSP position to byte offset
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Get the word at the cursor
let word = doc.word_at_offset(offset)?;
// Search for all occurrences of this word in the text
let mut locations = Vec::new();
let mut positions = doc.positions.clone();
for (byte_offset, _) in doc.text.match_indices(&word) {
// Check if this is a word boundary match
if is_word_boundary(&doc.text, byte_offset, word.len()) {
let (start_line, start_col) = positions.offset_to_position(byte_offset);
let (end_line, end_col) = positions.offset_to_position(byte_offset + word.len());
locations.push(Location {
uri: uri.clone(),
range: Range {
start: tower_lsp::lsp_types::Position {
line: start_line as u32,
character: start_col as u32,
},
end: tower_lsp::lsp_types::Position {
line: end_line as u32,
character: end_col as u32,
},
},
});
}
}
if locations.is_empty() {
None
} else {
Some(locations)
}
}
/// Convert LSP position to byte offset
fn position_to_offset(doc: &Document, line: usize, character: usize) -> Option<usize> {
let line_start = doc.positions.line_offset(line)?;
Some(line_start + character)
}
/// Check if a match is at a word boundary
fn is_word_boundary(text: &str, offset: usize, len: usize) -> bool {
let before_ok = offset == 0 || !is_word_char_at(text, offset - 1);
let after_ok = offset + len >= text.len() || !is_word_char_at(text, offset + len);
before_ok && after_ok
}
fn is_word_char_at(text: &str, offset: usize) -> bool {
text.chars()
.nth(offset)
.is_some_and(|c| c.is_alphanumeric() || c == '_')
}

399
src/lsp/rename.rs Normal file
View File

@@ -0,0 +1,399 @@
//! Rename refactoring provider
//!
//! Provides semantic, workspace-wide symbol renaming using NameTable
use std::collections::HashMap;
use tower_lsp::lsp_types::{
Position,
Range,
RenameParams,
TextEdit,
Url,
WorkspaceEdit,
};
use super::document::Document;
use crate::{
position::PositionTracker,
resolve::find_all_references,
};
/// Perform a workspace-wide semantic rename operation
pub fn get_rename_edits(
documents: &HashMap<Url, Document>,
params: &RenameParams,
uri: &Url,
) -> Option<WorkspaceEdit> {
let doc = documents.get(uri)?;
let position = params.text_document_position.position;
// Convert LSP position to byte offset
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Get the word at the cursor
let old_name = doc.word_at_offset(offset)?;
// Look up the symbol in the name table - this validates it's a real symbol
let entry = doc.name_table.resolve_name(&old_name)?;
// Get the symbol's kind for semantic matching
let symbol_kind = entry.kind;
// Collect all ASTs and build file_index -> URL mapping
let mut file_asts = Vec::new();
let mut file_url_map: HashMap<usize, Url> = HashMap::new();
let mut url_file_map: HashMap<Url, usize> = HashMap::new();
let mut url_positions_map: HashMap<Url, PositionTracker> = HashMap::new();
for (doc_uri, document) in documents {
if let Some(ref ast) = document.ast {
let file_index = file_asts.len();
file_asts.push(ast.clone());
file_url_map.insert(file_index, doc_uri.clone());
url_file_map.insert(doc_uri.clone(), file_index);
url_positions_map.insert(doc_uri.clone(), document.positions.clone());
}
}
// Find all semantic references using the language layer
let references = find_all_references(&file_asts, &old_name, symbol_kind);
// Convert references to TextEdits grouped by file
let mut all_changes: HashMap<Url, Vec<TextEdit>> = HashMap::new();
for reference in references {
// Get the URL for this file
if let Some(url) = file_url_map.get(&reference.file_index) {
if let Some(mut positions) = url_positions_map.get(url).cloned() {
// Convert byte offsets to line/col positions
let (start_line, start_col) = positions.offset_to_position(reference.span.start);
let (end_line, end_col) = positions.offset_to_position(reference.span.end);
let edit = TextEdit {
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
new_text: params.new_name.clone(),
};
all_changes
.entry(url.clone())
.or_default()
.push(edit);
}
}
}
if all_changes.is_empty() {
return None;
}
Some(WorkspaceEdit {
changes: Some(all_changes),
document_changes: None,
change_annotations: None,
})
}
/// Prepare rename - check if rename is valid at this position
pub fn prepare_rename(doc: &Document, position: Position) -> Option<Range> {
// Convert LSP position to byte offset
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Get the word at the cursor
let word = doc.word_at_offset(offset)?;
// Check if this is a valid symbol in the name table
let entry = doc.name_table.resolve_name(&word)?;
// Return the range of the symbol at its definition
let mut positions = doc.positions.clone();
let (start_line, start_col) = positions.offset_to_position(entry.span.start);
let (end_line, end_col) = positions.offset_to_position(entry.span.end);
Some(Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
})
}
/// Convert LSP position to byte offset
fn position_to_offset(doc: &Document, line: usize, character: usize) -> Option<usize> {
let line_start = doc.positions.line_offset(line)?;
Some(line_start + character)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::lsp::document::Document;
fn make_documents(files: Vec<(&str, &str)>) -> HashMap<Url, Document> {
files
.into_iter()
.map(|(uri_str, content)| {
let uri = Url::parse(uri_str).unwrap();
let doc = Document::new(content.to_string());
(uri, doc)
})
.collect()
}
#[test]
fn test_rename_character_single_file() {
let source = r#"
character Alice {}
character Bob { friend: Alice }
"#;
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 11,
},
},
new_name: "Alicia".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let workspace_edit = result.unwrap();
let changes = workspace_edit.changes.unwrap();
let edits = changes.get(&uri).unwrap();
// Should find 2 occurrences: definition and reference
assert_eq!(edits.len(), 2);
assert!(edits.iter().all(|e| e.new_text == "Alicia"));
}
#[test]
fn test_rename_across_multiple_files() {
let file1 = "character Alice {}";
let file2 = "character Bob { friend: Alice }";
let file3 = "character Charlie { mentor: Alice }";
let documents = make_documents(vec![
("file:///file1.sb", file1),
("file:///file2.sb", file2),
("file:///file3.sb", file3),
]);
let uri = Url::parse("file:///file1.sb").unwrap();
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 11,
},
},
new_name: "Alicia".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let changes = result.unwrap().changes.unwrap();
// Should have edits in all 3 files
assert_eq!(changes.len(), 3);
// File 1: definition
assert_eq!(
changes
.get(&Url::parse("file:///file1.sb").unwrap())
.unwrap()
.len(),
1
);
// File 2: one reference
assert_eq!(
changes
.get(&Url::parse("file:///file2.sb").unwrap())
.unwrap()
.len(),
1
);
// File 3: one reference
assert_eq!(
changes
.get(&Url::parse("file:///file3.sb").unwrap())
.unwrap()
.len(),
1
);
}
#[test]
fn test_rename_template() {
let source = r#"
template Person {}
character Alice from Person {}
"#;
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 10,
},
},
new_name: "Human".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let edits = result.unwrap().changes.unwrap().get(&uri).unwrap().clone();
assert_eq!(edits.len(), 2); // Definition + usage
}
#[test]
fn test_rename_not_found() {
let source = "character Alice {}";
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 0,
}, // On "character" keyword
},
new_name: "NewName".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_none());
}
#[test]
fn test_prepare_rename_valid() {
let source = "character Alice {}";
let doc = Document::new(source.to_string());
let result = prepare_rename(
&doc,
Position {
line: 0,
character: 11,
},
);
assert!(result.is_some());
let range = result.unwrap();
assert_eq!(range.start.line, 0);
assert_eq!(range.end.line, 0);
}
#[test]
fn test_prepare_rename_invalid() {
let source = "character Alice {}";
let doc = Document::new(source.to_string());
// Position on keyword "character"
let result = prepare_rename(
&doc,
Position {
line: 0,
character: 0,
},
);
assert!(result.is_none());
}
#[test]
fn test_rename_respects_symbol_kind() {
// Test that we don't rename unrelated symbols with the same name in different
// contexts
let source = r#"
character Alice {}
template Person {}
character Bob { friend: Alice }
character Charlie from Person {}
"#;
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
// Try to rename the character Alice
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 11,
},
},
new_name: "Alicia".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let edits = result.unwrap().changes.unwrap().get(&uri).unwrap().clone();
// Should rename the character definition and its field reference, but not the
// template
assert_eq!(edits.len(), 2);
}
#[test]
fn test_rename_with_word_boundaries() {
let source = r#"
character Alice {}
character AliceJr {}
character NotAlice {}
"#;
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 11,
},
},
new_name: "Alicia".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let edits = result.unwrap().changes.unwrap().get(&uri).unwrap().clone();
// Should only rename exact "Alice", not "AliceJr" or "NotAlice"
assert_eq!(edits.len(), 1);
}
}

536
src/lsp/semantic_tokens.rs Normal file
View File

@@ -0,0 +1,536 @@
//! Semantic tokens for enhanced syntax highlighting
//!
//! Provides detailed token type information beyond what a basic grammar can
//! provide, allowing the editor to highlight different kinds of identifiers,
//! keywords, and values with appropriate semantic meaning.
use tower_lsp::lsp_types::{
SemanticToken,
SemanticTokenType,
SemanticTokens,
SemanticTokensResult,
};
use super::document::Document;
use crate::syntax::{
ast::{
Declaration,
Field,
Value,
},
lexer::{
Lexer,
Token,
},
};
/// Standard semantic token types supported by LSP
pub const LEGEND_TYPES: &[SemanticTokenType] = &[
SemanticTokenType::NAMESPACE, // use paths
SemanticTokenType::TYPE, // template names, species names, enum names
SemanticTokenType::CLASS, // character declarations
SemanticTokenType::ENUM, // enum declarations
SemanticTokenType::INTERFACE, // template declarations
SemanticTokenType::STRUCT, // institution, location declarations
SemanticTokenType::PARAMETER, // action parameters
SemanticTokenType::VARIABLE, // character names in schedules
SemanticTokenType::PROPERTY, // field names
SemanticTokenType::ENUM_MEMBER, // enum variant names
SemanticTokenType::FUNCTION, // behavior names
SemanticTokenType::METHOD, // relationship names
SemanticTokenType::KEYWORD, // keywords like "from", "include", "strict"
SemanticTokenType::STRING, // string literals
SemanticTokenType::NUMBER, // numeric literals
SemanticTokenType::OPERATOR, // operators like "..", "->", etc.
];
/// Semantic token modifiers (currently unused but available for future use)
pub const LEGEND_MODIFIERS: &[&str] = &[
"declaration",
"definition",
"readonly",
"static",
"deprecated",
"abstract",
"async",
"modification",
"documentation",
"defaultLibrary",
];
/// Helper to find identifier positions within a span using the lexer
fn find_identifiers_in_span(
text: &str,
span_start: usize,
span_end: usize,
target_names: &[String],
) -> Vec<(usize, String)> {
let span_text = &text[span_start..span_end];
let lexer = Lexer::new(span_text);
let tokens: Vec<_> = lexer.collect();
let mut results = Vec::new();
for (offset, token, _end) in tokens {
if let Token::Ident(name) = token {
if target_names.contains(&name) {
results.push((span_start + offset, name));
}
}
}
results
}
/// Recursively highlight behavior tree nodes
fn highlight_behavior_node(
builder: &mut SemanticTokensBuilder,
doc: &Document,
node: &crate::syntax::ast::BehaviorNode,
) {
use crate::syntax::ast::BehaviorNode;
match node {
| BehaviorNode::Selector { children, .. } | BehaviorNode::Sequence { children, .. } => {
for child in children {
highlight_behavior_node(builder, doc, child);
}
},
| BehaviorNode::Action(action_name, params) => {
// Action names don't have spans, so we'd need to search for them
// For now, just highlight the parameters
for param in params {
highlight_field(builder, param);
}
let _ = action_name; // Suppress warning
},
| BehaviorNode::Decorator { child, .. } => {
highlight_behavior_node(builder, doc, child);
},
| BehaviorNode::SubTree(_path) => {
// SubTree references another behavior by path
// Would need position tracking to highlight
},
| BehaviorNode::Condition(_expr) => {
// Conditions contain expressions which could be highlighted
// Would need expression traversal
},
}
}
/// Generate semantic tokens for a document
pub fn get_semantic_tokens(doc: &Document) -> Option<SemanticTokensResult> {
let ast = doc.ast.as_ref()?;
let mut builder = SemanticTokensBuilder::new(&doc.text);
let mut positions = doc.positions.clone();
// Process all top-level declarations
for decl in &ast.declarations {
match decl {
| Declaration::Use(use_decl) => {
// Highlight use paths as namespaces
let path_positions = find_identifiers_in_span(
&doc.text,
use_decl.span.start,
use_decl.span.end,
&use_decl.path,
);
for (offset, segment) in path_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
segment.len(),
token_type_index(SemanticTokenType::NAMESPACE),
0,
);
}
},
| Declaration::Character(character) => {
// Highlight character name as CLASS
builder.add_token(
character.span.start_line,
character.span.start_col,
character.name.len(),
token_type_index(SemanticTokenType::CLASS),
0,
);
// Highlight species as TYPE
if let Some(ref species) = character.species {
let species_positions = find_identifiers_in_span(
&doc.text,
character.span.start,
character.span.end,
&[species.clone()],
);
for (offset, species_name) in species_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
species_name.len(),
token_type_index(SemanticTokenType::TYPE),
0,
);
}
}
// Highlight template references
if let Some(ref templates) = character.template {
let template_positions = find_identifiers_in_span(
&doc.text,
character.span.start,
character.span.end,
templates,
);
for (offset, template_name) in template_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
template_name.len(),
token_type_index(SemanticTokenType::INTERFACE),
0,
);
}
}
// Highlight fields
for field in &character.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Template(template) => {
// Highlight template name as INTERFACE
builder.add_token(
template.span.start_line,
template.span.start_col,
template.name.len(),
token_type_index(SemanticTokenType::INTERFACE),
0,
);
// Find and highlight includes using the lexer
let include_positions = find_identifiers_in_span(
&doc.text,
template.span.start,
template.span.end,
&template.includes,
);
for (offset, include_name) in include_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
include_name.len(),
token_type_index(SemanticTokenType::INTERFACE),
0,
);
}
// Highlight fields
for field in &template.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Species(species) => {
// Highlight species name as TYPE
builder.add_token(
species.span.start_line,
species.span.start_col,
species.name.len(),
token_type_index(SemanticTokenType::TYPE),
0,
);
// Highlight fields
for field in &species.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Enum(enum_decl) => {
// Highlight enum name as ENUM
builder.add_token(
enum_decl.span.start_line,
enum_decl.span.start_col,
enum_decl.name.len(),
token_type_index(SemanticTokenType::ENUM),
0,
);
// Find and highlight enum variants using the lexer
let variant_positions = find_identifiers_in_span(
&doc.text,
enum_decl.span.start,
enum_decl.span.end,
&enum_decl.variants,
);
for (offset, variant_name) in variant_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
variant_name.len(),
token_type_index(SemanticTokenType::ENUM_MEMBER),
0,
);
}
},
| Declaration::Institution(institution) => {
// Highlight institution name as STRUCT
builder.add_token(
institution.span.start_line,
institution.span.start_col,
institution.name.len(),
token_type_index(SemanticTokenType::STRUCT),
0,
);
// Highlight fields
for field in &institution.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Location(location) => {
// Highlight location name as STRUCT
builder.add_token(
location.span.start_line,
location.span.start_col,
location.name.len(),
token_type_index(SemanticTokenType::STRUCT),
0,
);
// Highlight fields
for field in &location.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Behavior(behavior) => {
// Highlight behavior name as FUNCTION
builder.add_token(
behavior.span.start_line,
behavior.span.start_col,
behavior.name.len(),
token_type_index(SemanticTokenType::FUNCTION),
0,
);
// TODO: Traverse behavior tree to highlight conditions and actions
// Would need recursive function to walk BehaviorNode tree
highlight_behavior_node(&mut builder, doc, &behavior.root);
},
| Declaration::Relationship(relationship) => {
// Highlight relationship name as METHOD
builder.add_token(
relationship.span.start_line,
relationship.span.start_col,
relationship.name.len(),
token_type_index(SemanticTokenType::METHOD),
0,
);
// Highlight participants
for participant in &relationship.participants {
// For qualified paths like "Alice.parent", we want to highlight each segment
// The participant has its own span, so we can search within it
let participant_names = participant.name.clone();
let name_positions = find_identifiers_in_span(
&doc.text,
participant.span.start,
participant.span.end,
&participant_names,
);
for (offset, name) in name_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
name.len(),
token_type_index(SemanticTokenType::VARIABLE),
0,
);
}
}
// Highlight fields
for field in &relationship.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::LifeArc(life_arc) => {
// Highlight life_arc name as TYPE
builder.add_token(
life_arc.span.start_line,
life_arc.span.start_col,
life_arc.name.len(),
token_type_index(SemanticTokenType::TYPE),
0,
);
// Highlight states and transitions
for state in &life_arc.states {
// State name as ENUM_MEMBER
builder.add_token(
state.span.start_line,
state.span.start_col,
state.name.len(),
token_type_index(SemanticTokenType::ENUM_MEMBER),
0,
);
// State fields
if let Some(ref fields) = state.on_enter {
for field in fields {
highlight_field(&mut builder, field);
}
}
}
},
| Declaration::Schedule(schedule) => {
// Highlight schedule name as TYPE
builder.add_token(
schedule.span.start_line,
schedule.span.start_col,
schedule.name.len(),
token_type_index(SemanticTokenType::TYPE),
0,
);
// Highlight block fields
for block in &schedule.blocks {
for field in &block.fields {
highlight_field(&mut builder, field);
}
}
},
}
}
Some(SemanticTokensResult::Tokens(SemanticTokens {
result_id: None,
data: builder.build(),
}))
}
/// Helper to highlight a field
fn highlight_field(builder: &mut SemanticTokensBuilder, field: &Field) {
// Highlight field name as PROPERTY
builder.add_token(
field.span.start_line,
field.span.start_col,
field.name.len(),
token_type_index(SemanticTokenType::PROPERTY),
0,
);
// Highlight field value
highlight_value(builder, &field.value);
}
/// Helper to highlight a value
fn highlight_value(builder: &mut SemanticTokensBuilder, value: &Value) {
match value {
| Value::String(_s) => {
// String literals are already highlighted by the grammar
// but we could add semantic highlighting here if needed
},
| Value::Int(_) | Value::Float(_) => {
// Number literals are already highlighted by the grammar
},
| Value::Bool(_) => {
// Boolean literals are already highlighted by the grammar
},
| Value::Identifier(_path) => {
// Identifiers could be highlighted as TYPE references
// but we'd need precise position tracking
},
| Value::List(items) => {
for item in items {
highlight_value(builder, item);
}
},
| Value::Object(fields) => {
for field in fields {
highlight_field(builder, field);
}
},
| Value::Range(start, end) => {
highlight_value(builder, start);
highlight_value(builder, end);
},
| Value::ProseBlock(_) => {
// Prose blocks are already highlighted by the grammar
},
| Value::Override(_) => {
// Override values need their own handling
},
| Value::Time(_) | Value::Duration(_) => {
// Time/duration literals are already highlighted by the grammar
},
}
}
/// Get the index of a semantic token type in the legend
fn token_type_index(token_type: SemanticTokenType) -> u32 {
LEGEND_TYPES
.iter()
.position(|t| t == &token_type)
.unwrap_or(0) as u32
}
/// Builder for semantic tokens with proper delta encoding
struct SemanticTokensBuilder {
tokens: Vec<(usize, usize, usize, u32, u32)>, // (line, col, length, type, modifiers)
}
impl SemanticTokensBuilder {
fn new(_text: &str) -> Self {
Self { tokens: Vec::new() }
}
fn add_token(
&mut self,
line: usize,
col: usize,
length: usize,
token_type: u32,
modifiers: u32,
) {
self.tokens.push((line, col, length, token_type, modifiers));
}
fn build(mut self) -> Vec<SemanticToken> {
// Sort tokens by position (line, then column)
self.tokens
.sort_by_key(|(line, col, _, _, _)| (*line, *col));
// Convert to delta-encoded format required by LSP
let mut result = Vec::new();
let mut prev_line = 0;
let mut prev_col = 0;
for (line, col, length, token_type, modifiers) in self.tokens {
let delta_line = line - prev_line;
let delta_start = if delta_line == 0 { col - prev_col } else { col };
result.push(SemanticToken {
delta_line: delta_line as u32,
delta_start: delta_start as u32,
length: length as u32,
token_type,
token_modifiers_bitset: modifiers,
});
prev_line = line;
prev_col = col;
}
result
}
}

472
src/lsp/server.rs Normal file
View File

@@ -0,0 +1,472 @@
//! Main LSP server implementation with full feature support
use std::{
collections::HashMap,
sync::Arc,
};
use tokio::sync::RwLock;
use tower_lsp::{
jsonrpc::Result,
lsp_types::{
CodeActionParams,
CodeActionResponse,
CompletionOptions,
CompletionParams,
CompletionResponse,
Diagnostic,
DiagnosticSeverity,
DidChangeTextDocumentParams,
DidCloseTextDocumentParams,
DidOpenTextDocumentParams,
DocumentFormattingParams,
DocumentSymbolParams,
DocumentSymbolResponse,
GotoDefinitionParams,
GotoDefinitionResponse,
Hover,
HoverParams,
HoverProviderCapability,
InitializeParams,
InitializeResult,
InitializedParams,
InlayHint,
InlayHintParams,
Location,
MessageType,
OneOf,
Position,
Range,
ReferenceParams,
RenameParams,
SemanticTokensFullOptions,
SemanticTokensLegend,
SemanticTokensOptions,
SemanticTokensParams,
SemanticTokensResult,
SemanticTokensServerCapabilities,
ServerCapabilities,
ServerInfo,
TextDocumentSyncCapability,
TextDocumentSyncKind,
TextEdit,
Url,
WorkDoneProgressOptions,
WorkspaceEdit,
},
Client,
LanguageServer,
};
use super::{
code_actions,
completion,
definition,
document::{
Document,
ErrorSeverity,
},
formatting,
hover,
inlay_hints,
references,
rename,
semantic_tokens,
symbols,
};
use crate::resolve::names::NameTable;
/// Workspace-level state tracking all documents and cross-file references
#[derive(Debug)]
struct WorkspaceState {
/// Combined name table from all open documents
name_table: NameTable,
/// Mapping from file index to URL
file_urls: HashMap<usize, Url>,
}
impl WorkspaceState {
fn new() -> Self {
Self {
name_table: NameTable::new(),
file_urls: HashMap::new(),
}
}
/// Rebuild the workspace name table from all documents
fn rebuild(&mut self, documents: &HashMap<Url, Document>) {
self.name_table = NameTable::new();
self.file_urls.clear();
// Build name table from all parsed documents
for (file_index, (url, doc)) in documents.iter().enumerate() {
self.file_urls.insert(file_index, url.clone());
if let Some(ref ast) = doc.ast {
// Build name table for this file
// TODO: Properly merge file name tables with file_index
// For now, we'll use the simple single-file approach
let _ = (ast, file_index); // Use variables to avoid warnings
}
}
}
}
/// The main language server instance
pub struct StorybookLanguageServer {
client: Client,
documents: Arc<RwLock<HashMap<Url, Document>>>,
workspace: Arc<RwLock<WorkspaceState>>,
}
impl StorybookLanguageServer {
pub fn new(client: Client) -> Self {
Self {
client,
documents: Arc::new(RwLock::new(HashMap::new())),
workspace: Arc::new(RwLock::new(WorkspaceState::new())),
}
}
/// Rebuild workspace state after document changes
async fn rebuild_workspace(&self) {
let documents = self.documents.read().await;
let mut workspace = self.workspace.write().await;
workspace.rebuild(&documents);
}
/// Publish diagnostics for a document
async fn publish_diagnostics(&self, uri: &Url, doc: &Document) {
let mut positions = doc.positions.clone();
let mut diagnostics = Vec::new();
// Add parse errors
for error in &doc.parse_errors {
let (start_line, start_col) = positions.offset_to_position(error.start);
let (end_line, end_col) = positions.offset_to_position(error.end);
diagnostics.push(Diagnostic {
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
severity: Some(match error.severity {
| ErrorSeverity::Error => DiagnosticSeverity::ERROR,
| ErrorSeverity::Warning => DiagnosticSeverity::WARNING,
}),
code: None,
source: Some("storybook-parser".to_string()),
message: error.message.clone(),
related_information: None,
tags: None,
code_description: None,
data: None,
});
}
// Add semantic validation errors
for error in &doc.resolve_errors {
// For now, show resolve errors at the start of the document
// TODO: Extract proper position information from ResolveError spans
diagnostics.push(Diagnostic {
range: Range {
start: Position {
line: 0,
character: 0,
},
end: Position {
line: 0,
character: 1,
},
},
severity: Some(DiagnosticSeverity::ERROR),
code: None,
source: Some("storybook-validator".to_string()),
message: format!("{}", error),
related_information: None,
tags: None,
code_description: None,
data: None,
});
}
self.client
.publish_diagnostics(uri.clone(), diagnostics, None)
.await;
}
}
#[tower_lsp::async_trait]
impl LanguageServer for StorybookLanguageServer {
async fn initialize(&self, _params: InitializeParams) -> Result<InitializeResult> {
Ok(InitializeResult {
capabilities: ServerCapabilities {
text_document_sync: Some(TextDocumentSyncCapability::Kind(
TextDocumentSyncKind::FULL,
)),
hover_provider: Some(HoverProviderCapability::Simple(true)),
completion_provider: Some(CompletionOptions {
trigger_characters: Some(vec![
".".to_string(),
":".to_string(),
"@".to_string(),
]),
..Default::default()
}),
document_symbol_provider: Some(OneOf::Left(true)),
definition_provider: Some(OneOf::Left(true)),
references_provider: Some(OneOf::Left(true)),
document_formatting_provider: Some(OneOf::Left(true)),
rename_provider: Some(OneOf::Right(tower_lsp::lsp_types::RenameOptions {
prepare_provider: Some(true),
work_done_progress_options: Default::default(),
})),
code_action_provider: Some(
tower_lsp::lsp_types::CodeActionProviderCapability::Simple(true),
),
semantic_tokens_provider: Some(
SemanticTokensServerCapabilities::SemanticTokensOptions(
SemanticTokensOptions {
work_done_progress_options: WorkDoneProgressOptions::default(),
legend: SemanticTokensLegend {
token_types: semantic_tokens::LEGEND_TYPES.to_vec(),
token_modifiers: semantic_tokens::LEGEND_MODIFIERS
.iter()
.map(|s| tower_lsp::lsp_types::SemanticTokenModifier::new(s))
.collect(),
},
range: Some(false),
full: Some(SemanticTokensFullOptions::Bool(true)),
},
),
),
inlay_hint_provider: Some(OneOf::Left(true)),
..Default::default()
},
server_info: Some(ServerInfo {
name: "storybook-lsp".to_string(),
version: Some(env!("CARGO_PKG_VERSION").to_string()),
}),
})
}
async fn initialized(&self, _params: InitializedParams) {
self.client
.log_message(MessageType::INFO, "Storybook LSP server initialized! 🎉")
.await;
}
async fn shutdown(&self) -> Result<()> {
Ok(())
}
async fn did_open(&self, params: DidOpenTextDocumentParams) {
let uri = params.text_document.uri;
let text = params.text_document.text;
// Parse and store document
let doc = Document::new(text);
// Publish diagnostics
self.publish_diagnostics(&uri, &doc).await;
// Store document
self.documents.write().await.insert(uri, doc);
// Rebuild workspace for cross-file features
self.rebuild_workspace().await;
}
async fn did_change(&self, params: DidChangeTextDocumentParams) {
let uri = params.text_document.uri;
if let Some(change) = params.content_changes.into_iter().next() {
let text = change.text;
// Update document
let mut documents = self.documents.write().await;
if let Some(doc) = documents.get_mut(&uri) {
doc.update(text);
self.publish_diagnostics(&uri, doc).await;
}
drop(documents); // Release lock before rebuilding
}
// Rebuild workspace for cross-file features
self.rebuild_workspace().await;
}
async fn did_close(&self, params: DidCloseTextDocumentParams) {
// Remove document from cache
self.documents
.write()
.await
.remove(&params.text_document.uri);
// Rebuild workspace for cross-file features
self.rebuild_workspace().await;
}
async fn hover(&self, params: HoverParams) -> Result<Option<Hover>> {
let uri = params.text_document_position_params.text_document.uri;
let position = params.text_document_position_params.position;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
// Try keyword hover first
if let Some(hover) = hover::get_hover_info(
&doc.text,
position.line as usize,
position.character as usize,
) {
return Ok(Some(hover));
}
// Try semantic hover (symbols)
if let Some(hover) = hover::get_semantic_hover_info(
doc,
position.line as usize,
position.character as usize,
) {
return Ok(Some(hover));
}
}
Ok(None)
}
async fn completion(&self, params: CompletionParams) -> Result<Option<CompletionResponse>> {
let uri = params.text_document_position.text_document.uri.clone();
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
return Ok(completion::get_completions(doc, &params));
}
Ok(None)
}
async fn document_symbol(
&self,
params: DocumentSymbolParams,
) -> Result<Option<DocumentSymbolResponse>> {
let uri = params.text_document.uri;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
if let Some(ref ast) = doc.ast {
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
return Ok(Some(DocumentSymbolResponse::Nested(symbols)));
}
}
Ok(None)
}
async fn goto_definition(
&self,
params: GotoDefinitionParams,
) -> Result<Option<GotoDefinitionResponse>> {
let uri = params
.text_document_position_params
.text_document
.uri
.clone();
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
return Ok(definition::get_definition(doc, &params, &uri));
}
Ok(None)
}
async fn references(&self, params: ReferenceParams) -> Result<Option<Vec<Location>>> {
let uri = params.text_document_position.text_document.uri.clone();
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
return Ok(references::find_references(doc, &params, &uri));
}
Ok(None)
}
async fn formatting(&self, params: DocumentFormattingParams) -> Result<Option<Vec<TextEdit>>> {
let uri = params.text_document.uri;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
return Ok(formatting::format_document(doc, &params.options));
}
Ok(None)
}
async fn prepare_rename(
&self,
params: tower_lsp::lsp_types::TextDocumentPositionParams,
) -> Result<Option<tower_lsp::lsp_types::PrepareRenameResponse>> {
let uri = params.text_document.uri;
let position = params.position;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
if let Some(range) = rename::prepare_rename(doc, position) {
return Ok(Some(tower_lsp::lsp_types::PrepareRenameResponse::Range(
range,
)));
}
}
Ok(None)
}
async fn rename(&self, params: RenameParams) -> Result<Option<WorkspaceEdit>> {
let uri = params.text_document_position.text_document.uri.clone();
let documents = self.documents.read().await;
// Pass all documents for workspace-wide rename
Ok(rename::get_rename_edits(&documents, &params, &uri))
}
async fn code_action(&self, params: CodeActionParams) -> Result<Option<CodeActionResponse>> {
let documents = self.documents.read().await;
// Generate code actions for the given position
Ok(code_actions::get_code_actions(&documents, &params))
}
async fn semantic_tokens_full(
&self,
params: SemanticTokensParams,
) -> Result<Option<SemanticTokensResult>> {
let uri = params.text_document.uri;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
Ok(semantic_tokens::get_semantic_tokens(doc))
} else {
Ok(None)
}
}
async fn inlay_hint(&self, params: InlayHintParams) -> Result<Option<Vec<InlayHint>>> {
let uri = params.text_document.uri;
let range = params.range;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
Ok(inlay_hints::get_inlay_hints(doc, range.start, range.end))
} else {
Ok(None)
}
}
}

324
src/lsp/symbols.rs Normal file
View File

@@ -0,0 +1,324 @@
//! Document symbol extraction for outline view
use tower_lsp::lsp_types::{
DocumentSymbol,
Position,
Range,
SymbolKind,
};
use crate::{
position::PositionTracker,
syntax::ast::*,
};
/// Extract document symbols from AST
pub fn extract_symbols_from_ast(
ast: &File,
positions: &mut PositionTracker,
) -> Vec<DocumentSymbol> {
let mut symbols = Vec::new();
for decl in &ast.declarations {
if let Some(symbol) = extract_declaration_symbol(decl, positions) {
symbols.push(symbol);
}
}
symbols
}
/// Extract a symbol from a declaration
#[allow(deprecated)]
fn extract_declaration_symbol(
decl: &Declaration,
positions: &mut PositionTracker,
) -> Option<DocumentSymbol> {
let (name, kind, span, children) = match decl {
| Declaration::Character(c) => (
c.name.clone(),
SymbolKind::CLASS,
c.span.clone(),
extract_field_symbols(&c.fields, positions),
),
| Declaration::Template(t) => (
t.name.clone(),
SymbolKind::INTERFACE,
t.span.clone(),
extract_field_symbols(&t.fields, positions),
),
| Declaration::LifeArc(l) => (
l.name.clone(),
SymbolKind::FUNCTION,
l.span.clone(),
extract_state_symbols(&l.states, positions),
),
| Declaration::Schedule(s) => (
s.name.clone(),
SymbolKind::EVENT,
s.span.clone(),
extract_block_symbols(&s.blocks, positions),
),
| Declaration::Behavior(b) => (
b.name.clone(),
SymbolKind::MODULE,
b.span.clone(),
Vec::new(), // Behavior tree structure is too complex for symbol tree
),
| Declaration::Institution(i) => (
i.name.clone(),
SymbolKind::MODULE,
i.span.clone(),
extract_field_symbols(&i.fields, positions),
),
| Declaration::Relationship(r) => (
r.name.clone(),
SymbolKind::STRUCT,
r.span.clone(),
extract_field_symbols(&r.fields, positions),
),
| Declaration::Location(l) => (
l.name.clone(),
SymbolKind::CONSTANT,
l.span.clone(),
extract_field_symbols(&l.fields, positions),
),
| Declaration::Species(s) => (
s.name.clone(),
SymbolKind::CLASS,
s.span.clone(),
extract_field_symbols(&s.fields, positions),
),
| Declaration::Enum(e) => (
e.name.clone(),
SymbolKind::ENUM,
e.span.clone(),
extract_variant_symbols(&e.variants, positions),
),
| Declaration::Use(_) => return None, // Use statements don't create symbols
};
let (start_line, start_col) = positions.offset_to_position(span.start);
let (end_line, end_col) = positions.offset_to_position(span.end);
// Selection range is just the name
let name_end_offset = span.start + name.len();
let (name_end_line, name_end_col) = positions.offset_to_position(name_end_offset);
Some(DocumentSymbol {
name: name.clone(),
detail: None,
kind,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
selection_range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: name_end_line as u32,
character: name_end_col as u32,
},
},
children: if children.is_empty() {
None
} else {
Some(children)
},
})
}
/// Extract symbols from field declarations
#[allow(deprecated)]
fn extract_field_symbols(fields: &[Field], positions: &mut PositionTracker) -> Vec<DocumentSymbol> {
fields
.iter()
.map(|field| {
let (start_line, start_col) = positions.offset_to_position(field.span.start);
let (end_line, end_col) = positions.offset_to_position(field.span.end);
let name_end = field.span.start + field.name.len();
let (name_end_line, name_end_col) = positions.offset_to_position(name_end);
DocumentSymbol {
name: field.name.clone(),
detail: None,
kind: SymbolKind::FIELD,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
selection_range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: name_end_line as u32,
character: name_end_col as u32,
},
},
children: None,
}
})
.collect()
}
/// Extract symbols from life arc states
#[allow(deprecated)]
fn extract_state_symbols(
states: &[ArcState],
positions: &mut PositionTracker,
) -> Vec<DocumentSymbol> {
states
.iter()
.map(|state| {
let (start_line, start_col) = positions.offset_to_position(state.span.start);
let (end_line, end_col) = positions.offset_to_position(state.span.end);
let name_end = state.span.start + state.name.len();
let (name_end_line, name_end_col) = positions.offset_to_position(name_end);
DocumentSymbol {
name: state.name.clone(),
detail: Some("state".to_string()),
kind: SymbolKind::PROPERTY,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
selection_range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: name_end_line as u32,
character: name_end_col as u32,
},
},
children: None,
}
})
.collect()
}
/// Extract symbols from schedule blocks
#[allow(deprecated)]
fn extract_block_symbols(
blocks: &[ScheduleBlock],
positions: &mut PositionTracker,
) -> Vec<DocumentSymbol> {
blocks
.iter()
.map(|block| {
let (start_line, start_col) = positions.offset_to_position(block.span.start);
let (end_line, end_col) = positions.offset_to_position(block.span.end);
// For blocks, the "name" is the activity
let name = block.activity.clone();
let name_end = block.span.start + name.len();
let (name_end_line, name_end_col) = positions.offset_to_position(name_end);
DocumentSymbol {
name,
detail: Some(format!(
"{:02}:{:02}-{:02}:{:02}",
block.start.hour, block.start.minute, block.end.hour, block.end.minute
)),
kind: SymbolKind::PROPERTY,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
selection_range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: name_end_line as u32,
character: name_end_col as u32,
},
},
children: None,
}
})
.collect()
}
/// Extract symbols from enum variants (simple string list)
#[allow(deprecated)]
fn extract_variant_symbols(
variants: &[String],
_positions: &PositionTracker,
) -> Vec<DocumentSymbol> {
// For enum variants, we don't have span information for individual variants
// since they're just strings. Return an empty vec for now.
// In the future, we could enhance the parser to track variant spans.
variants
.iter()
.enumerate()
.map(|(i, variant)| DocumentSymbol {
name: variant.clone(),
detail: None,
kind: SymbolKind::ENUM_MEMBER,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: i as u32,
character: 0,
},
end: Position {
line: i as u32,
character: variant.len() as u32,
},
},
selection_range: Range {
start: Position {
line: i as u32,
character: 0,
},
end: Position {
line: i as u32,
character: variant.len() as u32,
},
},
children: None,
})
.collect()
}

623
src/lsp/tests.rs Normal file
View File

@@ -0,0 +1,623 @@
//! Comprehensive test suite for LSP server functionality
use document::Document;
use tower_lsp::lsp_types::*;
use super::*;
// Test data fixtures
const SAMPLE_STORYBOOK: &str = r#"
species Human {
intelligence: high
lifespan: 80
}
enum Mood {
Happy,
Sad,
Angry
}
character Alice: Human {
age: 7
mood: Happy
---backstory
A curious girl who loves adventures
---
}
template Child {
age: 5..12
guardian: Human
}
character Bob: Human from Child {
age: 10
guardian: Alice
}
life_arc Growing {
state child {
on enter {
age: 5
}
}
state teen {
on enter {
age: 13
}
}
state adult {
on enter {
age: 18
}
}
}
schedule DailyRoutine {
08:00 -> 09:00: breakfast {
activity: eating
}
09:00 -> 12:00: school {
activity: learning
}
}
relationship Friendship {
Alice as friend {
bond_strength: 5
}
Bob as friend {
bond_strength: 5
}
}
"#;
#[cfg(test)]
mod document_tests {
use super::*;
#[test]
fn test_document_creation() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
if !doc.parse_errors.is_empty() {
eprintln!("Parse errors:");
for err in &doc.parse_errors {
eprintln!(" - {}", err.message);
}
}
assert_eq!(doc.text, SAMPLE_STORYBOOK);
assert!(doc.ast.is_some(), "AST should be parsed");
}
#[test]
fn test_document_with_errors() {
let invalid = "character { invalid syntax }";
let doc = Document::new(invalid.to_string());
assert!(doc.ast.is_none(), "Invalid syntax should not produce AST");
assert!(!doc.parse_errors.is_empty(), "Should have parse errors");
}
#[test]
fn test_document_update() {
let mut doc = Document::new("character Alice {}".to_string());
doc.update("character Bob {}".to_string());
assert_eq!(doc.text, "character Bob {}");
assert!(doc.name_table.resolve_name("Bob").is_some());
assert!(doc.name_table.resolve_name("Alice").is_none());
}
#[test]
fn test_symbol_extraction() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
assert!(doc.name_table.resolve_name("Alice").is_some());
assert!(doc.name_table.resolve_name("Bob").is_some());
assert!(doc.name_table.resolve_name("Child").is_some());
assert!(doc.name_table.resolve_name("Growing").is_some());
assert!(doc.name_table.resolve_name("DailyRoutine").is_some());
assert!(doc.name_table.resolve_name("Human").is_some());
assert!(doc.name_table.resolve_name("Mood").is_some());
assert!(doc.name_table.resolve_name("Friendship").is_some());
}
#[test]
fn test_symbol_kinds() {
use crate::resolve::names::DeclKind;
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let alice = doc.name_table.resolve_name("Alice").unwrap();
assert_eq!(alice.kind, DeclKind::Character);
let child = doc.name_table.resolve_name("Child").unwrap();
assert_eq!(child.kind, DeclKind::Template);
let growing = doc.name_table.resolve_name("Growing").unwrap();
assert_eq!(growing.kind, DeclKind::LifeArc);
}
#[test]
fn test_word_at_offset() {
let doc = Document::new("character Alice {}".to_string());
// Test finding "character" keyword
let word = doc.word_at_offset(5);
assert_eq!(word, Some("character".to_string()));
// Test finding "Alice" identifier
let word = doc.word_at_offset(12);
assert_eq!(word, Some("Alice".to_string()));
// Test whitespace returns None
let word = doc.word_at_offset(9);
assert_eq!(word, None);
}
}
#[cfg(test)]
mod position_tests {
use crate::position::PositionTracker;
#[test]
fn test_position_tracking_single_line() {
let mut tracker = PositionTracker::new("hello world");
assert_eq!(tracker.offset_to_position(0), (0, 0));
assert_eq!(tracker.offset_to_position(6), (0, 6));
assert_eq!(tracker.offset_to_position(11), (0, 11));
}
#[test]
fn test_position_tracking_multiline() {
let mut tracker = PositionTracker::new("line 1\nline 2\nline 3");
// Start of first line
assert_eq!(tracker.offset_to_position(0), (0, 0));
// Start of second line (after \n at offset 6)
assert_eq!(tracker.offset_to_position(7), (1, 0));
// Start of third line (after \n at offset 13)
assert_eq!(tracker.offset_to_position(14), (2, 0));
// Middle of second line
assert_eq!(tracker.offset_to_position(10), (1, 3));
}
#[test]
fn test_line_count() {
let tracker = PositionTracker::new("line 1\nline 2\nline 3");
assert_eq!(tracker.line_count(), 3);
}
#[test]
fn test_line_offset() {
let tracker = PositionTracker::new("line 1\nline 2\nline 3");
assert_eq!(tracker.line_offset(0), Some(0));
assert_eq!(tracker.line_offset(1), Some(7));
assert_eq!(tracker.line_offset(2), Some(14));
assert_eq!(tracker.line_offset(3), None);
}
}
#[cfg(test)]
mod hover_tests {
use super::*;
#[test]
fn test_hover_keywords() {
// Test character keyword
let hover = hover::get_hover_info("character Alice {}", 0, 5);
assert!(hover.is_some());
let hover = hover.unwrap();
if let HoverContents::Markup(content) = hover.contents {
assert!(content.value.contains("character"));
assert!(content.value.contains("Defines a character entity"));
}
// Test template keyword
let hover = hover::get_hover_info("template Child {}", 0, 2);
assert!(hover.is_some());
// Test life_arc keyword
let hover = hover::get_hover_info("life_arc Growing {}", 0, 5);
assert!(hover.is_some());
}
#[test]
fn test_hover_non_keyword() {
let hover = hover::get_hover_info("character Alice {}", 0, 12);
assert!(hover.is_none());
}
#[test]
fn test_hover_invalid_position() {
let hover = hover::get_hover_info("character Alice {}", 0, 100);
assert!(hover.is_none());
}
}
#[cfg(test)]
mod completion_tests {
use super::*;
#[test]
fn test_keyword_completions() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let params = CompletionParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: 0,
character: 0,
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
context: None,
};
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(CompletionResponse::Array(items)) = result {
// Should have keyword completions
assert!(items.iter().any(|item| item.label == "character"));
assert!(items.iter().any(|item| item.label == "template"));
assert!(items.iter().any(|item| item.label == "life_arc"));
// Should have entity completions from document
assert!(items.iter().any(|item| item.label == "Alice"));
assert!(items.iter().any(|item| item.label == "Bob"));
}
}
#[test]
fn test_completion_includes_snippets() {
let doc = Document::new("".to_string());
let params = CompletionParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position::default(),
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
context: None,
};
let result = completion::get_completions(&doc, &params);
if let Some(CompletionResponse::Array(items)) = result {
// Check that character completion has a snippet
let character_item = items.iter().find(|item| item.label == "character");
assert!(character_item.is_some());
let character_item = character_item.unwrap();
assert!(character_item.insert_text.is_some());
assert_eq!(
character_item.insert_text_format,
Some(InsertTextFormat::SNIPPET)
);
}
}
}
#[cfg(test)]
mod formatting_tests {
use super::*;
#[test]
fn test_basic_formatting() {
let doc = Document::new("character Alice{age:7}".to_string());
let options = FormattingOptions {
tab_size: 4,
insert_spaces: true,
..Default::default()
};
let result = formatting::format_document(&doc, &options);
assert!(result.is_some());
let edits = result.unwrap();
assert_eq!(edits.len(), 1);
let formatted = &edits[0].new_text;
eprintln!("Formatted output:\n{}", formatted);
assert!(formatted.contains("character Alice {") || formatted.contains("character Alice{"));
assert!(formatted.contains("age: 7") || formatted.contains("age:7"));
}
#[test]
fn test_formatting_indentation() {
let doc = Document::new("character Alice {\nage: 7\n}".to_string());
let options = FormattingOptions {
tab_size: 4,
insert_spaces: true,
..Default::default()
};
let result = formatting::format_document(&doc, &options);
assert!(result.is_some());
let formatted = &result.unwrap()[0].new_text;
// Check that age is indented with 4 spaces
assert!(formatted.contains(" age: 7"));
}
#[test]
fn test_formatting_preserves_prose() {
let doc = Document::new(
"character Alice {\n---backstory\nSome irregular spacing\n---\n}".to_string(),
);
let options = FormattingOptions::default();
let result = formatting::format_document(&doc, &options);
let formatted = &result.unwrap()[0].new_text;
// Prose content should be preserved exactly
assert!(formatted.contains("Some irregular spacing"));
}
#[test]
fn test_no_formatting_needed() {
let already_formatted = "character Alice {\n age: 7\n}\n";
let doc = Document::new(already_formatted.to_string());
let options = FormattingOptions::default();
let result = formatting::format_document(&doc, &options);
// Should return None if no changes needed
assert!(result.is_none());
}
}
#[cfg(test)]
mod symbols_tests {
use super::*;
#[test]
fn test_extract_symbols_from_ast() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
// Should have top-level declarations
assert!(symbols.iter().any(|s| s.name == "Alice"));
assert!(symbols.iter().any(|s| s.name == "Child"));
assert!(symbols.iter().any(|s| s.name == "Bob"));
assert!(symbols.iter().any(|s| s.name == "Growing"));
assert!(symbols.iter().any(|s| s.name == "DailyRoutine"));
assert!(symbols.iter().any(|s| s.name == "Human"));
assert!(symbols.iter().any(|s| s.name == "Mood"));
assert!(symbols.iter().any(|s| s.name == "Friendship"));
}
#[test]
fn test_symbol_hierarchy() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
// Alice should have children (fields)
let alice = symbols.iter().find(|s| s.name == "Alice").unwrap();
assert!(alice.children.is_some());
let children = alice.children.as_ref().unwrap();
assert!(children.iter().any(|c| c.name == "age"));
assert!(children.iter().any(|c| c.name == "backstory"));
}
#[test]
fn test_symbol_kinds() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
let alice = symbols.iter().find(|s| s.name == "Alice").unwrap();
assert_eq!(alice.kind, SymbolKind::CLASS);
let child = symbols.iter().find(|s| s.name == "Child").unwrap();
assert_eq!(child.kind, SymbolKind::INTERFACE);
let mood = symbols.iter().find(|s| s.name == "Mood").unwrap();
assert_eq!(mood.kind, SymbolKind::ENUM);
}
#[test]
fn test_life_arc_states() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
let growing = symbols.iter().find(|s| s.name == "Growing").unwrap();
assert!(growing.children.is_some());
let states = growing.children.as_ref().unwrap();
assert!(states.iter().any(|s| s.name == "child"));
assert!(states.iter().any(|s| s.name == "teen"));
assert!(states.iter().any(|s| s.name == "adult"));
}
}
#[cfg(test)]
mod definition_tests {
use super::*;
#[test]
fn test_goto_definition_character() {
let mut doc = Document::new(SAMPLE_STORYBOOK.to_string());
// Find position of "Alice" in "character Alice"
let alice_offset = doc.text.find("character Alice").unwrap() + "character ".len();
let (line, col) = doc.positions.offset_to_position(alice_offset);
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: line as u32,
character: col as u32,
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
};
let uri = Url::parse("file:///test.sb").unwrap();
let result = definition::get_definition(&mut doc, &params, &uri);
assert!(result.is_some());
}
#[test]
fn test_goto_definition_not_found() {
let mut doc = Document::new("character Alice {}".to_string());
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: 0,
character: 0, // On "character" keyword, not a symbol
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
};
let uri = Url::parse("file:///test.sb").unwrap();
let result = definition::get_definition(&mut doc, &params, &uri);
assert!(result.is_none());
}
}
#[cfg(test)]
mod references_tests {
use super::*;
#[test]
fn test_find_references() {
let source = "character Alice {}\ncharacter Bob { friend: Alice }";
let mut doc = Document::new(source.to_string());
// Find position of first "Alice"
let alice_offset = source.find("Alice").unwrap();
let (line, col) = doc.positions.offset_to_position(alice_offset);
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: line as u32,
character: col as u32,
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let uri = Url::parse("file:///test.sb").unwrap();
let result = references::find_references(&mut doc, &params, &uri);
assert!(result.is_some());
let locations = result.unwrap();
// Should find both occurrences of "Alice"
assert_eq!(locations.len(), 2);
}
#[test]
fn test_find_references_word_boundaries() {
let source = "character Alice {}\ncharacter Alicia {}";
let mut doc = Document::new(source.to_string());
let alice_offset = source.find("Alice").unwrap();
let (line, col) = doc.positions.offset_to_position(alice_offset);
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: line as u32,
character: col as u32,
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let uri = Url::parse("file:///test.sb").unwrap();
let result = references::find_references(&mut doc, &params, &uri);
let locations = result.unwrap();
// Should only find "Alice", not "Alicia"
assert_eq!(locations.len(), 1);
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
#[test]
fn test_full_workflow() {
// Create a document
let mut doc = Document::new(SAMPLE_STORYBOOK.to_string());
// Verify parsing worked
assert!(doc.ast.is_some());
assert!(doc.parse_errors.is_empty());
// Verify symbols were extracted
assert!(doc.name_table.all_entries().count() > 0);
// Test updating document
doc.update("character NewChar {}".to_string());
assert!(doc.name_table.resolve_name("NewChar").is_some());
// Verify new AST was parsed
assert!(doc.ast.is_some());
}
#[test]
fn test_error_recovery() {
let invalid = "character { invalid }";
let doc = Document::new(invalid.to_string());
// Should handle errors gracefully
assert!(doc.ast.is_none());
assert!(!doc.parse_errors.is_empty());
// Symbols should be empty for invalid document
assert_eq!(doc.name_table.all_entries().count(), 0);
}
}

206
src/lsp/validation_tests.rs Normal file
View File

@@ -0,0 +1,206 @@
//! Tests for semantic validation integration
#[cfg(test)]
mod tests {
use crate::lsp::document::Document;
#[test]
fn test_reserved_keyword_caught_by_parser() {
// Reserved keywords are caught by the parser, not the validator
// This test verifies that parse errors catch reserved keywords
let source = r#"
character Alice {
self: "Bad field name"
}
"#;
let doc = Document::new(source.to_string());
// Should have parse error for reserved keyword
assert!(
!doc.parse_errors.is_empty(),
"Parser should catch reserved keyword 'self' as field name"
);
}
#[test]
fn test_valid_fields_no_validation_errors() {
let source = r#"
character Alice {
age: 7
name: "Alice"
}
"#;
let doc = Document::new(source.to_string());
// Should have no validation errors
assert!(
doc.resolve_errors.is_empty(),
"Valid code should have no validation errors"
);
}
#[test]
fn test_trait_range_validation() {
let source = r#"
character Alice {
bond: 1.5
}
"#;
let doc = Document::new(source.to_string());
// Should have error for bond value out of range
assert!(
!doc.resolve_errors.is_empty(),
"Should detect bond value out of range [0.0, 1.0]"
);
let error_message = format!("{}", doc.resolve_errors[0]);
assert!(
error_message.contains("1.5") || error_message.contains("range"),
"Error should mention the value or range: {}",
error_message
);
}
#[test]
fn test_valid_trait_ranges() {
let source = r#"
character Alice {
bond: 0.75
trust: 0.0
love: 1.0
}
"#;
let doc = Document::new(source.to_string());
// Should have no validation errors for valid trait ranges
assert!(
doc.resolve_errors.is_empty(),
"Valid trait values should produce no errors"
);
}
#[test]
fn test_life_arc_transition_validation() {
let source = r#"
life_arc Growing {
state child {
on birthday -> adult
}
}
"#;
let doc = Document::new(source.to_string());
// Should have error for transition to unknown state 'adult'
assert!(
!doc.resolve_errors.is_empty(),
"Should detect transition to undefined state"
);
let error_message = format!("{}", doc.resolve_errors[0]);
assert!(
error_message.contains("adult") || error_message.contains("unknown"),
"Error should mention unknown state"
);
}
#[test]
fn test_valid_life_arc_transitions() {
let source = r#"
life_arc Growing {
state child {
on birthday -> teen
}
state teen {
on birthday -> adult
}
state adult {}
}
"#;
let doc = Document::new(source.to_string());
// Should have no validation errors
assert!(
doc.resolve_errors.is_empty(),
"Valid life arc should produce no errors"
);
}
#[test]
fn test_schedule_overlap_validation() {
let source = r#"
schedule Daily {
08:00 -> 10:00: morning {}
09:00 -> 11:00: overlap {}
}
"#;
let doc = Document::new(source.to_string());
// Should have error for overlapping schedule blocks
assert!(
!doc.resolve_errors.is_empty(),
"Should detect overlapping schedule blocks"
);
let error_message = format!("{}", doc.resolve_errors[0]);
assert!(
error_message.contains("overlap"),
"Error should mention overlap"
);
}
#[test]
fn test_valid_schedule_no_overlaps() {
let source = r#"
schedule Daily {
08:00 -> 10:00: morning {}
10:00 -> 12:00: midday {}
12:00 -> 14:00: afternoon {}
}
"#;
let doc = Document::new(source.to_string());
// Should have no validation errors
assert!(
doc.resolve_errors.is_empty(),
"Non-overlapping schedule should produce no errors"
);
}
#[test]
fn test_multiple_validation_errors() {
// Test multiple validation errors at once
let source = r#"
character Alice {
bond: 2.0
trust: -0.5
}
"#;
let doc = Document::new(source.to_string());
// Should have multiple errors for out-of-range values
assert!(
doc.resolve_errors.len() >= 2,
"Should detect multiple range errors. Got {} errors",
doc.resolve_errors.len()
);
}
#[test]
fn test_parse_and_validation_errors_separate() {
let source = r#"
character Alice {
character: "Reserved"
invalid syntax here
}
"#;
let doc = Document::new(source.to_string());
// Parse should fail, so we won't have validation errors
// (validation only runs on successfully parsed AST)
assert!(
!doc.parse_errors.is_empty() || !doc.resolve_errors.is_empty(),
"Should have either parse or validation errors"
);
}
}

118
src/position.rs Normal file
View File

@@ -0,0 +1,118 @@
//! Position tracking utilities for converting byte offsets to line/column
//! positions
//!
//! This module provides efficient conversion between byte offsets and (line,
//! column) positions for LSP support.
use std::collections::HashMap;
/// Position tracker that can quickly convert byte offsets to line/column
/// positions
#[derive(Clone)]
pub struct PositionTracker {
/// Map from line number to byte offset where that line starts
line_starts: Vec<usize>,
/// Cache of offset -> (line, col) lookups
cache: HashMap<usize, (usize, usize)>,
}
impl PositionTracker {
/// Create a new position tracker from source text
pub fn new(source: &str) -> Self {
let mut line_starts = vec![0];
// Find all newline positions
for (offset, ch) in source.char_indices() {
if ch == '\n' {
line_starts.push(offset + 1);
}
}
Self {
line_starts,
cache: HashMap::new(),
}
}
/// Convert a byte offset to (line, column) position
/// Returns (line, col) where both are 0-indexed
pub fn offset_to_position(&mut self, offset: usize) -> (usize, usize) {
// Check cache first
if let Some(&pos) = self.cache.get(&offset) {
return pos;
}
// Binary search to find the line
let line = match self.line_starts.binary_search(&offset) {
| Ok(line) => line, // Exact match - start of a line
| Err(line) => line - 1, // Insert position tells us the line
};
let line_start = self.line_starts[line];
let col = offset - line_start;
self.cache.insert(offset, (line, col));
(line, col)
}
/// Get the total number of lines
pub fn line_count(&self) -> usize {
self.line_starts.len()
}
/// Get the byte offset for the start of a line
pub fn line_offset(&self, line: usize) -> Option<usize> {
self.line_starts.get(line).copied()
}
}
/// Create a Span with proper line/column information from byte offsets
pub fn create_span_with_position(
tracker: &mut PositionTracker,
start: usize,
end: usize,
) -> crate::syntax::ast::Span {
let (start_line, start_col) = tracker.offset_to_position(start);
let (end_line, end_col) = tracker.offset_to_position(end);
crate::syntax::ast::Span::with_position(start, end, start_line, start_col, end_line, end_col)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_position_tracking() {
let source = "line 1\nline 2\nline 3";
let mut tracker = PositionTracker::new(source);
// Start of first line
assert_eq!(tracker.offset_to_position(0), (0, 0));
// Middle of first line
assert_eq!(tracker.offset_to_position(3), (0, 3));
// Start of second line (after first \n)
assert_eq!(tracker.offset_to_position(7), (1, 0));
// Start of third line
assert_eq!(tracker.offset_to_position(14), (2, 0));
}
#[test]
fn test_multiline_unicode() {
let source = "Hello 世界\nLine 2";
let mut tracker = PositionTracker::new(source);
// Start of file
assert_eq!(tracker.offset_to_position(0), (0, 0));
// After "Hello "
assert_eq!(tracker.offset_to_position(6), (0, 6));
// Start of line 2
let newline_offset = "Hello 世界\n".len();
assert_eq!(tracker.offset_to_position(newline_offset), (1, 0));
}
}

View File

@@ -186,6 +186,8 @@ mod tests {
species: None,
fields,
prose_blocks: HashMap::new(),
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
}
}

View File

@@ -113,6 +113,8 @@ pub fn convert_character(character: &ast::Character) -> Result<ResolvedCharacter
species: None,
fields,
prose_blocks,
uses_behaviors: character.uses_behaviors.clone(),
uses_schedule: character.uses_schedule.clone(),
span: character.span.clone(),
})
}
@@ -124,6 +126,7 @@ pub fn convert_character(character: &ast::Character) -> Result<ResolvedCharacter
/// 2. Recursively resolving template includes
/// 3. Validating strict mode requirements
/// 4. Applying character's own fields on top
/// 5. Merging behavior and schedule links from templates
pub fn convert_character_with_templates(
character: &ast::Character,
all_files: &[ast::File],
@@ -136,6 +139,45 @@ pub fn convert_character_with_templates(
character.fields.clone()
};
// Collect behavior and schedule links from templates
let (merged_behaviors, merged_schedules) = if let Some(template_names) = &character.template {
let template_behaviors = Vec::new();
let template_schedules = Vec::new();
for template_name in template_names {
// Look up template
let entry = name_table
.lookup(std::slice::from_ref(template_name))
.ok_or_else(|| ResolveError::NameNotFound {
name: template_name.clone(),
suggestion: name_table.find_suggestion(template_name),
})?;
// Get template declaration
if let ast::Declaration::Template(_template) =
&all_files[entry.file_index].declarations[entry.decl_index]
{
// Templates don't have uses_behaviors/uses_schedule yet, but
// they will For now, just pass empty vecs
// TODO: Add template resource linking support
}
}
// Merge using merge functions from merge.rs
let merged_b =
merge::merge_behavior_links(character.uses_behaviors.clone(), template_behaviors);
let merged_s =
merge::merge_schedule_links(character.uses_schedule.clone(), template_schedules);
(merged_b, merged_s)
} else {
// No templates, just use character's own links
(
character.uses_behaviors.clone(),
character.uses_schedule.clone(),
)
};
// Extract fields and prose blocks from merged result
let (fields, prose_blocks) = extract_fields_and_prose(&merged_fields)?;
@@ -144,6 +186,8 @@ pub fn convert_character_with_templates(
species: None,
fields,
prose_blocks,
uses_behaviors: merged_behaviors,
uses_schedule: merged_schedules,
span: character.span.clone(),
})
}
@@ -358,6 +402,8 @@ mod tests {
},
],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -394,6 +440,8 @@ mod tests {
},
],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 100),
};
@@ -424,6 +472,8 @@ mod tests {
},
],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -460,6 +510,8 @@ mod tests {
span: Span::new(0, 10),
}],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
}),
ast::Declaration::Enum(EnumDecl {
@@ -497,6 +549,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(10, 50),
}),
@@ -571,6 +625,8 @@ mod tests {
}],
strict: false,
includes: vec![],
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -583,6 +639,8 @@ mod tests {
span: Span::new(0, 10),
}],
template: Some(vec!["Person".to_string()]),
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 100),
};
@@ -621,6 +679,8 @@ mod tests {
}],
strict: false,
includes: vec![],
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -633,6 +693,8 @@ mod tests {
}],
strict: false,
includes: vec![],
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -652,6 +714,8 @@ mod tests {
},
],
template: Some(vec!["Physical".to_string(), "Mental".to_string()]),
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 100),
};
@@ -688,6 +752,8 @@ mod tests {
}],
strict: false,
includes: vec![],
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -700,6 +766,8 @@ mod tests {
}],
strict: false,
includes: vec!["Human".to_string()],
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -736,6 +804,8 @@ mod tests {
span: Span::new(0, 10),
}],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -763,6 +833,8 @@ mod tests {
}],
strict: true,
includes: vec![],
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -771,6 +843,8 @@ mod tests {
species: None,
fields: vec![], // No fields - inherits range from template
template: Some(vec!["Person".to_string()]),
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 100),
};

View File

@@ -105,8 +105,8 @@ fn test_multiple_declarations_end_to_end() {
fn test_relationship_end_to_end() {
let source = r#"
relationship Spousal {
Martha
David
Martha { }
David { }
bond: 0.9
}
"#;
@@ -157,7 +157,7 @@ fn test_life_arc_end_to_end() {
fn test_behavior_tree_end_to_end() {
let source = r#"
behavior WorkAtBakery {
> {
then {
walk
work(duration: 8h)
rest
@@ -172,7 +172,7 @@ fn test_behavior_tree_end_to_end() {
| ResolvedDeclaration::Behavior(b) => {
assert_eq!(b.name, "WorkAtBakery");
// Root should be a Sequence node
assert!(matches!(b.root, BehaviorNode::Sequence(_)));
assert!(matches!(b.root, BehaviorNode::Sequence { .. }));
},
| _ => panic!("Expected Behavior"),
}
@@ -325,8 +325,8 @@ Martha grew up in a small town.
}
relationship Spousal {
Martha
David
Martha { }
David { }
bond: 0.9
}

View File

@@ -92,6 +92,8 @@ fn valid_character() -> impl Strategy<Value = Character> {
species: None,
fields,
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 100),
})
}
@@ -207,6 +209,8 @@ proptest! {
},
],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -243,6 +247,8 @@ proptest! {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
};
@@ -291,6 +297,8 @@ proptest! {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 100),
}));
}
@@ -341,6 +349,8 @@ mod edge_cases {
},
],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};
@@ -368,6 +378,8 @@ mod edge_cases {
span: Span::new(0, 10),
}],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 50),
};

View File

@@ -132,8 +132,8 @@ fn test_all_declaration_kinds() {
name: "Test"
}
relationship R {
C
C
C { }
C { }
}
location Loc {
name: "Place"

View File

@@ -39,8 +39,6 @@ impl RelationshipKey {
#[derive(Debug, Clone)]
struct RelationshipDecl {
relationship: Relationship,
/// Which participant is "self" (index into participants)
self_index: Option<usize>,
}
/// Resolved bidirectional relationship
@@ -57,10 +55,8 @@ pub struct ResolvedRelationship {
pub struct ParticipantFields {
pub participant_name: Vec<String>,
pub role: Option<String>,
/// Fields from this participant's "self" block
pub self_fields: Vec<Field>,
/// Fields from this participant's "other" block (about other participants)
pub other_fields: Vec<Field>,
/// Fields from this participant's block
pub fields: Vec<Field>,
}
/// Resolve bidirectional relationships in a file
@@ -76,18 +72,11 @@ pub fn resolve_relationships(file: &File) -> Result<Vec<ResolvedRelationship>> {
let key = RelationshipKey::new(participant_names, rel.name.clone());
// Determine which participant is "self" based on self/other blocks
let self_index = rel
.participants
.iter()
.position(|p| p.self_block.is_some() || p.other_block.is_some());
relationship_groups
.entry(key)
.or_default()
.push(RelationshipDecl {
relationship: rel.clone(),
self_index,
});
}
}
@@ -122,43 +111,31 @@ fn merge_relationship_declarations(
.map(|p| ParticipantFields {
participant_name: p.name.clone(),
role: p.role.clone(),
self_fields: p.self_block.clone().unwrap_or_default(),
other_fields: p.other_block.clone().unwrap_or_default(),
fields: p.fields.clone(),
})
.collect();
// Merge shared fields (fields outside participant blocks)
let mut merged_fields = base.fields.clone();
// Merge additional declarations
for decl in decls.iter().skip(1) {
// If this declaration specifies a different participant as "self",
// merge their self/other blocks appropriately
if let Some(self_idx) = decl.self_index {
let participant_name = &decl.relationship.participants[self_idx].name;
// Merge participant fields
for participant in &decl.relationship.participants {
// Find this participant in our merged list
if let Some(idx) = participant_fields
if let Some(pf_idx) = participant_fields
.iter()
.position(|pf| &pf.participant_name == participant_name)
.position(|pf| pf.participant_name == participant.name)
{
// Merge self blocks
let self_block = decl.relationship.participants[self_idx]
.self_block
.clone()
.unwrap_or_default();
merge_fields(&mut participant_fields[idx].self_fields, self_block)?;
// Merge other blocks
let other_block = decl.relationship.participants[self_idx]
.other_block
.clone()
.unwrap_or_default();
merge_fields(&mut participant_fields[idx].other_fields, other_block)?;
// Merge fields for this participant
merge_fields(
&mut participant_fields[pf_idx].fields,
participant.fields.clone(),
)?;
}
}
}
// Merge shared fields (fields outside self/other blocks)
let mut merged_fields = base.fields.clone();
for decl in decls.iter().skip(1) {
// Merge shared relationship fields
merge_fields(&mut merged_fields, decl.relationship.fields.clone())?;
}
@@ -209,8 +186,7 @@ mod tests {
Participant {
name: vec![name.to_string()],
role: role.map(|s| s.to_string()),
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(0, 10),
}
}
@@ -257,14 +233,12 @@ mod tests {
}
#[test]
fn test_bidirectional_relationship_merge() {
fn test_relationship_merge() {
let mut martha_participant = make_participant("Martha", Some("spouse"));
martha_participant.self_block = Some(vec![make_field("bond", 90)]);
martha_participant.other_block = Some(vec![make_field("trust", 85)]);
martha_participant.fields = vec![make_field("commitment", 90)];
let mut david_participant = make_participant("David", Some("spouse"));
david_participant.self_block = Some(vec![make_field("bond", 90)]);
david_participant.other_block = Some(vec![make_field("trust", 85)]);
david_participant.fields = vec![make_field("trust", 85)];
let file = File {
declarations: vec![
@@ -297,10 +271,10 @@ mod tests {
#[test]
fn test_conflicting_field_values() {
let mut p1 = make_participant("Alice", None);
p1.self_block = Some(vec![make_field("bond", 80)]);
p1.fields = vec![make_field("bond", 80)];
let mut p2 = make_participant("Alice", None);
p2.self_block = Some(vec![make_field("bond", 90)]); // Different value
p2.fields = vec![make_field("bond", 90)]; // Different value
let file = File {
declarations: vec![

View File

@@ -73,26 +73,21 @@ fn valid_participant(name: String) -> impl Strategy<Value = Participant> {
prop::option::of(valid_ident()).prop_map(move |role| Participant {
name: vec![name.clone()],
role,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(0, 10),
})
}
#[allow(dead_code)]
fn valid_participant_with_blocks(name: String) -> impl Strategy<Value = Participant> {
(
prop::option::of(valid_ident()),
prop::option::of(valid_field_list()),
prop::option::of(valid_field_list()),
)
.prop_map(move |(role, self_block, other_block)| Participant {
(prop::option::of(valid_ident()), valid_field_list()).prop_map(move |(role, fields)| {
Participant {
name: vec![name.clone()],
role,
self_block,
other_block,
fields,
span: Span::new(0, 10),
})
}
})
}
fn valid_relationship() -> impl Strategy<Value = Relationship> {
@@ -142,15 +137,13 @@ fn valid_bidirectional_relationship() -> impl Strategy<Value = (Relationship, Re
let p1 = Participant {
name: vec![p1_name.clone()],
role: None,
self_block: Some(p1_self),
other_block: None,
fields: p1_self,
span: Span::new(0, 10),
};
let p2_in_p1_rel = Participant {
name: vec![p2_name.clone()],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(0, 10),
};
@@ -165,15 +158,13 @@ fn valid_bidirectional_relationship() -> impl Strategy<Value = (Relationship, Re
let p2 = Participant {
name: vec![p2_name],
role: None,
self_block: Some(p2_self),
other_block: None,
fields: p2_self,
span: Span::new(20, 30),
};
let p1_in_p2_rel = Participant {
name: vec![p1_name],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(20, 30),
};
@@ -257,15 +248,13 @@ proptest! {
Participant {
name: vec![p1.clone()],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(0, 10),
},
Participant {
name: vec![p2.clone()],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(0, 10),
},
],
@@ -279,15 +268,13 @@ proptest! {
Participant {
name: vec![p2.clone()],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(20, 30),
},
Participant {
name: vec![p1.clone()],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(20, 30),
},
],
@@ -327,15 +314,13 @@ proptest! {
Participant {
name: vec![p1.clone()],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(0, 10),
},
Participant {
name: vec![p2.clone()],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(0, 10),
},
],
@@ -349,15 +334,13 @@ proptest! {
Participant {
name: vec![p1],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(20, 30),
},
Participant {
name: vec![p2],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(20, 30),
},
],
@@ -391,16 +374,14 @@ proptest! {
let participant1 = Participant {
name: vec![p1.clone()],
role: None,
self_block: Some(fields1),
other_block: None,
fields: fields1,
span: Span::new(0, 10),
};
let participant1_again = Participant {
name: vec![p1.clone()],
role: None,
self_block: Some(fields2),
other_block: None,
fields: fields2,
span: Span::new(20, 30),
};
@@ -411,8 +392,7 @@ proptest! {
Participant {
name: vec![p2.clone()],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(0, 10),
},
],
@@ -427,8 +407,7 @@ proptest! {
Participant {
name: vec![p2],
role: None,
self_block: None,
other_block: None,
fields: vec![],
span: Span::new(20, 30),
},
],
@@ -461,6 +440,8 @@ proptest! {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
})),
valid_ident().prop_map(|name| Declaration::Template(Template {
@@ -468,6 +449,8 @@ proptest! {
fields: vec![],
strict: false,
includes: vec![],
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
})),
],

View File

@@ -103,6 +103,84 @@ pub fn resolve_template_includes(
Ok(merged_fields)
}
// ===== Resource Linking Merge =====
/// Merge behavior links from templates into character
///
/// Algorithm (override-by-name semantics from design doc):
/// 1. Start with character's own behavior links (highest priority)
/// 2. For each template (in order), add its behavior links if not already
/// present by tree name
/// 3. Return concatenated list with character links first
///
/// This implements the merge semantics from
/// resource-linking-checkpoint2-addendum.md
pub fn merge_behavior_links(
character_links: Option<Vec<crate::syntax::ast::BehaviorLink>>,
template_links: Vec<Vec<crate::syntax::ast::BehaviorLink>>,
) -> Option<Vec<crate::syntax::ast::BehaviorLink>> {
use crate::syntax::ast::BehaviorLink;
// Start with character's own links
let mut result: Vec<BehaviorLink> = character_links.unwrap_or_default();
// Track which behavior trees are already linked (by tree path)
let mut seen_trees: HashSet<Vec<String>> =
result.iter().map(|link| link.tree.clone()).collect();
// Merge template links (in order)
for template_link_set in template_links {
for link in template_link_set {
// Only add if not already present (character overrides templates)
if !seen_trees.contains(&link.tree) {
seen_trees.insert(link.tree.clone());
result.push(link);
}
}
}
if result.is_empty() {
None
} else {
Some(result)
}
}
/// Merge schedule links from templates into character
///
/// Algorithm:
/// 1. Start with character's own schedule links
/// 2. For each template (in order), add its schedule links if not already
/// present
/// 3. Return concatenated list with character schedules first
pub fn merge_schedule_links(
character_schedules: Option<Vec<String>>,
template_schedules: Vec<Vec<String>>,
) -> Option<Vec<String>> {
// Start with character's own schedules
let mut result: Vec<String> = character_schedules.unwrap_or_default();
// Track which schedules are already linked
let mut seen_schedules: HashSet<String> = result.iter().cloned().collect();
// Merge template schedules (in order)
for template_schedule_set in template_schedules {
for schedule in template_schedule_set {
// Only add if not already present (character overrides templates)
if !seen_schedules.contains(&schedule) {
seen_schedules.insert(schedule.clone());
result.push(schedule);
}
}
}
if result.is_empty() {
None
} else {
Some(result)
}
}
/// Merge character templates into character fields
///
/// Algorithm:
@@ -499,6 +577,8 @@ mod tests {
fields,
includes: includes.iter().map(|s| s.to_string()).collect(),
strict,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
}
}
@@ -513,6 +593,8 @@ mod tests {
} else {
Some(templates.iter().map(|s| s.to_string()).collect())
},
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
}
}

View File

@@ -12,6 +12,7 @@ pub mod convert;
pub mod links;
pub mod merge;
pub mod names;
pub mod references;
pub mod types;
pub mod validate;
@@ -43,9 +44,15 @@ mod convert_integration_tests;
use miette::Diagnostic;
pub use names::{
DeclKind,
NameTable,
QualifiedPath,
};
pub use references::{
find_all_references,
Reference,
ReferenceContext,
};
use thiserror::Error;
pub use types::ResolvedFile;

View File

@@ -322,6 +322,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
}),
@@ -330,6 +332,8 @@ mod tests {
fields: vec![],
strict: false,
includes: vec![],
uses_behaviors: None,
uses_schedule: None,
span: Span::new(20, 30),
}),
],
@@ -351,6 +355,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
}),
@@ -359,6 +365,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(20, 30),
}),
@@ -377,6 +385,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
})],
@@ -408,6 +418,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(20, 30),
}),
@@ -483,6 +495,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
})],
@@ -494,6 +508,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
})],
@@ -517,6 +533,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
})],
@@ -528,6 +546,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(20, 30),
})],
@@ -549,6 +569,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
})],
@@ -559,6 +581,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
})],
@@ -580,6 +604,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
})],
@@ -590,6 +616,8 @@ mod tests {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(20, 30),
})],

View File

@@ -55,6 +55,8 @@ fn valid_character_decl() -> impl Strategy<Value = (String, Declaration)> {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
});
@@ -69,6 +71,8 @@ fn valid_template_decl() -> impl Strategy<Value = (String, Declaration)> {
fields: vec![],
strict: false,
includes: vec![],
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
});
(name, decl)
@@ -161,6 +165,8 @@ proptest! {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(i * 10, i * 10 + 10),
})
@@ -182,6 +188,8 @@ proptest! {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
}),
@@ -260,6 +268,8 @@ proptest! {
species: None,
fields: vec![],
template: None,
uses_behaviors: None,
uses_schedule: None,
span: Span::new(0, 10),
}),

685
src/resolve/references.rs Normal file
View File

@@ -0,0 +1,685 @@
//! Semantic reference tracking and resolution
//!
//! This module provides semantic analysis to find all references to symbols,
//! enabling features like rename refactoring and find-all-references.
use super::names::{
DeclKind,
NameTable,
};
use crate::syntax::ast::{
Behavior,
Character,
Declaration,
Field,
File,
Institution,
LifeArc,
Location,
Participant,
Relationship,
Schedule,
Span,
Species,
Template,
Value,
};
/// A reference to a symbol in the code
#[derive(Debug, Clone, PartialEq)]
pub struct Reference {
/// The name being referenced
pub name: String,
/// Kind of symbol being referenced
pub kind: DeclKind,
/// Location of the reference (just the identifier)
pub span: Span,
/// Index of the file containing this reference
pub file_index: usize,
/// Context of the reference
pub context: ReferenceContext,
}
/// Context describing where and how a symbol is referenced
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ReferenceContext {
/// Symbol definition/declaration
Definition,
/// Used as a type annotation (e.g., `character Alice: Person`)
TypeAnnotation,
/// Used in a field value (e.g., `friend: Alice`)
FieldValue,
/// Referenced in a behavior tree (e.g., `@WorkAtBakery`)
BehaviorReference,
/// Used in a template include
TemplateInclude,
/// Used in a relationship participant
RelationshipParticipant,
/// Other/unknown context
Other,
}
/// Find all references to a specific symbol across all files
pub fn find_all_references(
files: &[File],
symbol_name: &str,
symbol_kind: DeclKind,
) -> Vec<Reference> {
let mut references = Vec::new();
for (file_index, file) in files.iter().enumerate() {
// Build name table to validate symbols exist
let name_table = match NameTable::from_file(file) {
| Ok(table) => table,
| Err(_) => continue, // Skip files with errors
};
// Find definition if it exists in this file
if let Some(entry) = name_table.resolve_name(symbol_name) {
if entry.kind == symbol_kind {
references.push(Reference {
name: symbol_name.to_string(),
kind: symbol_kind,
span: entry.span.clone(),
file_index,
context: ReferenceContext::Definition,
});
}
}
// Walk AST to find all semantic references
for decl in &file.declarations {
references.extend(find_references_in_declaration(
decl,
symbol_name,
symbol_kind,
file_index,
));
}
}
references
}
/// Find references within a single declaration
fn find_references_in_declaration(
decl: &Declaration,
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
) -> Vec<Reference> {
let mut refs = Vec::new();
match decl {
| Declaration::Character(c) => {
refs.extend(find_references_in_character(
c,
symbol_name,
symbol_kind,
file_index,
));
},
| Declaration::Template(t) => {
refs.extend(find_references_in_template(
t,
symbol_name,
symbol_kind,
file_index,
));
},
| Declaration::LifeArc(l) => {
refs.extend(find_references_in_life_arc(
l,
symbol_name,
symbol_kind,
file_index,
));
},
| Declaration::Schedule(s) => {
refs.extend(find_references_in_schedule(
s,
symbol_name,
symbol_kind,
file_index,
));
},
| Declaration::Behavior(b) => {
refs.extend(find_references_in_behavior(
b,
symbol_name,
symbol_kind,
file_index,
));
},
| Declaration::Institution(i) => {
refs.extend(find_references_in_institution(
i,
symbol_name,
symbol_kind,
file_index,
));
},
| Declaration::Relationship(r) => {
refs.extend(find_references_in_relationship(
r,
symbol_name,
symbol_kind,
file_index,
));
},
| Declaration::Location(l) => {
refs.extend(find_references_in_location(
l,
symbol_name,
symbol_kind,
file_index,
));
},
| Declaration::Species(s) => {
refs.extend(find_references_in_species(
s,
symbol_name,
symbol_kind,
file_index,
));
},
| Declaration::Enum(e) => {
// Enums themselves don't reference symbols, but their values might be
// referenced Skip for now
let _ = (e, symbol_name, symbol_kind, file_index);
},
| Declaration::Use(_) => {
// Use statements are handled separately
},
}
refs
}
fn find_references_in_character(
c: &Character,
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
) -> Vec<Reference> {
let mut refs = Vec::new();
// Check species annotation
if let Some(ref species) = c.species {
if species == symbol_name && symbol_kind == DeclKind::Species {
refs.push(Reference {
name: symbol_name.to_string(),
kind: symbol_kind,
span: c.span.clone(),
file_index,
context: ReferenceContext::TypeAnnotation,
});
}
}
// Check templates (character can have multiple)
if let Some(ref templates) = c.template {
for template in templates {
if template == symbol_name && symbol_kind == DeclKind::Template {
refs.push(Reference {
name: symbol_name.to_string(),
kind: symbol_kind,
span: c.span.clone(),
file_index,
context: ReferenceContext::TypeAnnotation,
});
}
}
}
// Check fields for identifier references
refs.extend(find_references_in_fields(
&c.fields,
symbol_name,
symbol_kind,
file_index,
));
refs
}
fn find_references_in_template(
t: &Template,
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
) -> Vec<Reference> {
let mut refs = Vec::new();
// Check includes
for include in &t.includes {
if include == symbol_name && symbol_kind == DeclKind::Template {
refs.push(Reference {
name: symbol_name.to_string(),
kind: symbol_kind,
span: t.span.clone(),
file_index,
context: ReferenceContext::TemplateInclude,
});
}
}
// Check fields
refs.extend(find_references_in_fields(
&t.fields,
symbol_name,
symbol_kind,
file_index,
));
refs
}
fn find_references_in_fields(
fields: &[Field],
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
) -> Vec<Reference> {
let mut refs = Vec::new();
for field in fields {
// Check if field value is an identifier that references the symbol
refs.extend(find_references_in_value(
&field.value,
symbol_name,
symbol_kind,
file_index,
field.span.clone(),
));
}
refs
}
fn find_references_in_value(
value: &Value,
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
span: Span,
) -> Vec<Reference> {
let mut refs = Vec::new();
match value {
| Value::Identifier(path) => {
// Check if this identifier references our symbol
if let Some(name) = path.last() {
if name == symbol_name {
// Identifiers can reference characters, templates, enums, species
let matches_kind = matches!(
symbol_kind,
DeclKind::Character |
DeclKind::Template |
DeclKind::Enum |
DeclKind::Species
);
if matches_kind {
refs.push(Reference {
name: symbol_name.to_string(),
kind: symbol_kind,
span,
file_index,
context: ReferenceContext::FieldValue,
});
}
}
}
},
| Value::List(values) => {
// Recursively check list values
for v in values {
refs.extend(find_references_in_value(
v,
symbol_name,
symbol_kind,
file_index,
span.clone(),
));
}
},
| Value::Object(fields) => {
// Recursively check object fields
refs.extend(find_references_in_fields(
fields,
symbol_name,
symbol_kind,
file_index,
));
},
| Value::Override(override_val) => {
// Check the base template reference
if let Some(base_name) = override_val.base.last() {
if base_name == symbol_name && symbol_kind == DeclKind::Template {
refs.push(Reference {
name: symbol_name.to_string(),
kind: symbol_kind,
span: override_val.span.clone(),
file_index,
context: ReferenceContext::FieldValue,
});
}
}
},
| _ => {
// Other value types don't contain references
},
}
refs
}
fn find_references_in_life_arc(
_l: &LifeArc,
_symbol_name: &str,
_symbol_kind: DeclKind,
_file_index: usize,
) -> Vec<Reference> {
// Life arcs don't typically reference other symbols
Vec::new()
}
fn find_references_in_schedule(
_s: &Schedule,
_symbol_name: &str,
_symbol_kind: DeclKind,
_file_index: usize,
) -> Vec<Reference> {
// Schedules don't typically reference other symbols
Vec::new()
}
fn find_references_in_behavior(
_b: &Behavior,
_symbol_name: &str,
_symbol_kind: DeclKind,
_file_index: usize,
) -> Vec<Reference> {
// TODO: Parse behavior tree nodes to find @BehaviorName references
// This requires walking the BehaviorNode tree
Vec::new()
}
fn find_references_in_institution(
i: &Institution,
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
) -> Vec<Reference> {
find_references_in_fields(&i.fields, symbol_name, symbol_kind, file_index)
}
fn find_references_in_relationship(
r: &Relationship,
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
) -> Vec<Reference> {
let mut refs = Vec::new();
// Check participant references
for participant in &r.participants {
refs.extend(find_references_in_participant(
participant,
symbol_name,
symbol_kind,
file_index,
));
}
refs
}
fn find_references_in_participant(
p: &Participant,
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
) -> Vec<Reference> {
let mut refs = Vec::new();
// Check if participant name references the symbol
if let Some(participant_name) = p.name.last() {
if participant_name == symbol_name && symbol_kind == DeclKind::Character {
refs.push(Reference {
name: symbol_name.to_string(),
kind: symbol_kind,
span: p.span.clone(),
file_index,
context: ReferenceContext::RelationshipParticipant,
});
}
}
// Check participant fields
refs.extend(find_references_in_fields(
&p.fields,
symbol_name,
symbol_kind,
file_index,
));
refs
}
fn find_references_in_location(
l: &Location,
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
) -> Vec<Reference> {
find_references_in_fields(&l.fields, symbol_name, symbol_kind, file_index)
}
fn find_references_in_species(
s: &Species,
symbol_name: &str,
symbol_kind: DeclKind,
file_index: usize,
) -> Vec<Reference> {
find_references_in_fields(&s.fields, symbol_name, symbol_kind, file_index)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::syntax::{
lexer::Lexer,
FileParser,
};
fn parse(source: &str) -> File {
let lexer = Lexer::new(source);
FileParser::new().parse(lexer).unwrap()
}
#[test]
fn test_find_character_references_in_field() {
let source = r#"
character Alice {}
character Bob { friend: Alice }
"#;
let file = parse(source);
let files = vec![file];
let refs = find_all_references(&files, "Alice", DeclKind::Character);
// Should find: definition + field reference
assert_eq!(refs.len(), 2);
let definition = refs
.iter()
.find(|r| r.context == ReferenceContext::Definition);
assert!(definition.is_some());
let field_ref = refs
.iter()
.find(|r| r.context == ReferenceContext::FieldValue);
assert!(field_ref.is_some());
}
#[test]
fn test_find_template_references() {
let source = r#"
template Person {}
character Alice from Person {}
"#;
let file = parse(source);
let files = vec![file];
let refs = find_all_references(&files, "Person", DeclKind::Template);
// Should find: definition + type annotation
assert_eq!(refs.len(), 2);
let type_ref = refs
.iter()
.find(|r| r.context == ReferenceContext::TypeAnnotation);
assert!(type_ref.is_some());
}
#[test]
fn test_find_species_references() {
let source = r#"
species Human {}
character Alice: Human {}
"#;
let file = parse(source);
let files = vec![file];
let refs = find_all_references(&files, "Human", DeclKind::Species);
// Should find: definition + species annotation
assert_eq!(refs.len(), 2);
}
#[test]
fn test_find_references_across_multiple_files() {
let file1 = parse("character Alice {}");
let file2 = parse("character Bob { friend: Alice }");
let file3 = parse("character Charlie { mentor: Alice }");
let files = vec![file1, file2, file3];
let refs = find_all_references(&files, "Alice", DeclKind::Character);
// Should find: 1 definition + 2 references
assert_eq!(refs.len(), 3);
let def = refs
.iter()
.filter(|r| r.context == ReferenceContext::Definition)
.count();
assert_eq!(def, 1);
let field_refs = refs
.iter()
.filter(|r| r.context == ReferenceContext::FieldValue)
.count();
assert_eq!(field_refs, 2);
// Check file indices
assert_eq!(refs.iter().filter(|r| r.file_index == 0).count(), 1);
assert_eq!(refs.iter().filter(|r| r.file_index == 1).count(), 1);
assert_eq!(refs.iter().filter(|r| r.file_index == 2).count(), 1);
}
#[test]
fn test_respects_symbol_kind() {
let source = r#"
character Alice {}
template Person {}
character Bob { friend: Alice }
character Charlie from Person {}
"#;
let file = parse(source);
let files = vec![file];
// Find character Alice
let char_refs = find_all_references(&files, "Alice", DeclKind::Character);
// Should find: character definition + field reference
assert_eq!(char_refs.len(), 2);
// Find template Person
let template_refs = find_all_references(&files, "Person", DeclKind::Template);
// Should find: template definition + from reference
assert_eq!(template_refs.len(), 2);
}
#[test]
fn test_template_includes() {
let source = r#"
template Base {}
template Extended { include Base }
"#;
let file = parse(source);
let files = vec![file];
let refs = find_all_references(&files, "Base", DeclKind::Template);
// Should find: definition + include reference
assert_eq!(refs.len(), 2);
let include_ref = refs
.iter()
.find(|r| r.context == ReferenceContext::TemplateInclude);
assert!(include_ref.is_some());
}
#[test]
fn test_relationship_participants() {
let source = r#"
character Alice {}
character Bob {}
relationship Friends { Alice as friend {} Bob as friend {} }
"#;
let file = parse(source);
let files = vec![file];
let alice_refs = find_all_references(&files, "Alice", DeclKind::Character);
let bob_refs = find_all_references(&files, "Bob", DeclKind::Character);
// Each should have: definition + relationship participant
assert_eq!(alice_refs.len(), 2);
assert_eq!(bob_refs.len(), 2);
}
#[test]
fn test_no_references_found() {
let source = "character Alice {}";
let file = parse(source);
let files = vec![file];
// Look for non-existent symbol
let refs = find_all_references(&files, "Bob", DeclKind::Character);
// Should find nothing
assert_eq!(refs.len(), 0);
}
#[test]
fn test_enum_field_references() {
let source = r#"
enum Mood { Happy, Sad }
character Alice { mood: Mood }
"#;
let file = parse(source);
let files = vec![file];
let refs = find_all_references(&files, "Mood", DeclKind::Enum);
// Should find: definition + field value reference
assert_eq!(refs.len(), 2);
let field_ref = refs
.iter()
.find(|r| r.context == ReferenceContext::FieldValue);
assert!(field_ref.is_some());
}
}

View File

@@ -133,14 +133,9 @@ pub fn validate_relationship_bonds(relationships: &[Relationship], collector: &m
}
}
// Validate self/other blocks if present
// Validate participant fields
for participant in &rel.participants {
if let Some(ref self_fields) = participant.self_block {
validate_trait_ranges(self_fields, collector);
}
if let Some(ref other_fields) = participant.other_block {
validate_trait_ranges(other_fields, collector);
}
validate_trait_ranges(&participant.fields, collector);
}
}
}
@@ -165,7 +160,7 @@ pub fn validate_schedule_overlaps(schedule: &Schedule, collector: &mut ErrorColl
collector.add(ResolveError::ScheduleOverlap {
block1: format!(
"{} ({}:{:02}-{}:{:02})",
block1.activity,
block1.name.as_ref().unwrap_or(&block1.activity),
block1.start.hour,
block1.start.minute,
block1.end.hour,
@@ -173,7 +168,7 @@ pub fn validate_schedule_overlaps(schedule: &Schedule, collector: &mut ErrorColl
),
block2: format!(
"{} ({}:{:02}-{}:{:02})",
block2.activity,
block2.name.as_ref().unwrap_or(&block2.activity),
block2.start.hour,
block2.start.minute,
block2.end.hour,
@@ -242,7 +237,7 @@ fn validate_tree_node_actions(
collector: &mut ErrorCollector,
) {
match node {
| BehaviorNode::Sequence(children) | BehaviorNode::Selector(children) => {
| BehaviorNode::Sequence { children, .. } | BehaviorNode::Selector { children, .. } => {
for child in children {
validate_tree_node_actions(child, action_registry, tree_name, collector);
}
@@ -258,7 +253,7 @@ fn validate_tree_node_actions(
| BehaviorNode::Condition(_) => {
// Conditions are validated separately via expression validation
},
| BehaviorNode::Decorator(_name, child) => {
| BehaviorNode::Decorator { child, .. } => {
validate_tree_node_actions(child, action_registry, tree_name, collector);
},
| BehaviorNode::SubTree(_path) => {
@@ -267,6 +262,154 @@ fn validate_tree_node_actions(
}
}
/// Validate character resource linking
///
/// Checks:
/// 1. No duplicate behavior tree references
/// 2. Priority values are valid (handled by type system)
pub fn validate_character_resource_links(character: &Character, collector: &mut ErrorCollector) {
// Check for duplicate behavior tree references
if let Some(ref behavior_links) = character.uses_behaviors {
let mut seen_trees: HashSet<String> = HashSet::new();
for link in behavior_links {
let tree_name = link.tree.join("::");
if seen_trees.contains(&tree_name) {
collector.add(ResolveError::ValidationError {
message: format!(
"Character '{}' has duplicate behavior tree reference: {}",
character.name,
tree_name
),
help: Some(format!(
"The behavior tree '{}' is referenced multiple times in the uses behaviors list. Each behavior tree should only be referenced once. If you want different conditions or priorities, combine them into a single entry.",
tree_name
)),
});
}
seen_trees.insert(tree_name);
}
}
// Check for duplicate schedule references
if let Some(ref schedules) = character.uses_schedule {
let mut seen_schedules: HashSet<String> = HashSet::new();
for schedule in schedules {
if seen_schedules.contains(schedule) {
collector.add(ResolveError::ValidationError {
message: format!(
"Character '{}' has duplicate schedule reference: {}",
character.name,
schedule
),
help: Some(format!(
"The schedule '{}' is referenced multiple times. Each schedule should only be referenced once.",
schedule
)),
});
}
seen_schedules.insert(schedule.clone());
}
}
}
/// Validate institution resource linking
pub fn validate_institution_resource_links(
institution: &Institution,
collector: &mut ErrorCollector,
) {
// Check for duplicate behavior tree references
if let Some(ref behavior_links) = institution.uses_behaviors {
let mut seen_trees: HashSet<String> = HashSet::new();
for link in behavior_links {
let tree_name = link.tree.join("::");
if seen_trees.contains(&tree_name) {
collector.add(ResolveError::ValidationError {
message: format!(
"Institution '{}' has duplicate behavior tree reference: {}",
institution.name,
tree_name
),
help: Some(format!(
"The behavior tree '{}' is referenced multiple times. Each behavior tree should only be referenced once.",
tree_name
)),
});
}
seen_trees.insert(tree_name);
}
}
// Check for duplicate schedule references
if let Some(ref schedules) = institution.uses_schedule {
let mut seen_schedules: HashSet<String> = HashSet::new();
for schedule in schedules {
if seen_schedules.contains(schedule) {
collector.add(ResolveError::ValidationError {
message: format!(
"Institution '{}' has duplicate schedule reference: {}",
institution.name, schedule
),
help: Some(format!(
"The schedule '{}' is referenced multiple times.",
schedule
)),
});
}
seen_schedules.insert(schedule.clone());
}
}
}
/// Validate schedule composition requirements
///
/// Checks:
/// 1. All blocks in extended schedules have names
/// 2. Override blocks reference valid block names (requires name resolution)
pub fn validate_schedule_composition(schedule: &Schedule, collector: &mut ErrorCollector) {
// If schedule extends another, all blocks must have names for override system
if schedule.extends.is_some() {
for block in &schedule.blocks {
if block.name.is_none() && !block.activity.is_empty() {
collector.add(ResolveError::ValidationError {
message: format!(
"Schedule '{}' extends another schedule but has unnamed blocks",
schedule.name
),
help: Some(
"When a schedule extends another, all blocks must have names to support the override system. Use 'block name { ... }' syntax instead of 'time -> time : activity { ... }'.".to_string()
),
});
}
}
}
// Validate that new-style blocks have action references
for block in &schedule.blocks {
if block.name.is_some() && block.action.is_none() && block.activity.is_empty() {
collector.add(ResolveError::ValidationError {
message: format!(
"Schedule '{}' block '{}' missing action reference",
schedule.name,
block.name.as_ref().unwrap()
),
help: Some(
"Named blocks should specify a behavior using 'action: BehaviorName'. Example: 'block work { 9:00 -> 17:00, action: WorkBehavior }'".to_string()
),
});
}
}
}
/// Validate an entire file
///
/// Collects all validation errors and returns them together instead of failing
@@ -278,12 +421,17 @@ pub fn validate_file(file: &File, action_registry: &HashSet<String>) -> Result<(
match decl {
| Declaration::Character(c) => {
validate_trait_ranges(&c.fields, &mut collector);
validate_character_resource_links(c, &mut collector);
},
| Declaration::Institution(i) => {
validate_institution_resource_links(i, &mut collector);
},
| Declaration::Relationship(r) => {
validate_relationship_bonds(std::slice::from_ref(r), &mut collector);
},
| Declaration::Schedule(s) => {
validate_schedule_overlaps(s, &mut collector);
validate_schedule_composition(s, &mut collector);
},
| Declaration::LifeArc(la) => {
validate_life_arc_transitions(la, &mut collector);
@@ -472,10 +620,13 @@ mod tests {
let tree = Behavior {
name: "Test".to_string(),
root: BehaviorNode::Sequence(vec![
BehaviorNode::Action("walk".to_string(), vec![]),
BehaviorNode::Action("eat".to_string(), vec![]),
]),
root: BehaviorNode::Sequence {
label: None,
children: vec![
BehaviorNode::Action("walk".to_string(), vec![]),
BehaviorNode::Action("eat".to_string(), vec![]),
],
},
span: Span::new(0, 100),
};

View File

@@ -1,13 +1,52 @@
/// Source location for error reporting
/// Source location for error reporting with line/column information
#[derive(Debug, Clone, PartialEq)]
pub struct Span {
pub start: usize,
pub end: usize,
pub start_line: usize, // 0-indexed line number
pub start_col: usize, // 0-indexed column number
pub end_line: usize,
pub end_col: usize,
}
impl Span {
pub fn new(start: usize, end: usize) -> Self {
Self { start, end }
Self {
start,
end,
start_line: 0,
start_col: 0,
end_line: 0,
end_col: 0,
}
}
pub fn with_position(
start: usize,
end: usize,
start_line: usize,
start_col: usize,
end_line: usize,
end_col: usize,
) -> Self {
Self {
start,
end,
start_line,
start_col,
end_line,
end_col,
}
}
/// Convert to LSP Position for the start
pub fn start_position(&self) -> (u32, u32) {
(self.start_line as u32, self.start_col as u32)
}
/// Convert to LSP Position for the end
pub fn end_position(&self) -> (u32, u32) {
(self.end_line as u32, self.end_col as u32)
}
}
@@ -48,6 +87,36 @@ pub enum UseKind {
Wildcard, // use foo::*
}
/// Link to a behavior tree with optional conditions and priority
#[derive(Debug, Clone, PartialEq)]
pub struct BehaviorLink {
pub tree: Vec<String>, // Qualified path to behavior tree
pub condition: Option<Expr>, // Optional when clause
pub priority: Priority, // Execution priority
pub span: Span,
}
/// Priority levels for behavior selection
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Priority {
Low,
Normal,
High,
Critical,
}
impl Priority {
pub fn from_str(s: &str) -> Option<Self> {
match s {
| "low" => Some(Priority::Low),
| "normal" => Some(Priority::Normal),
| "high" => Some(Priority::High),
| "critical" => Some(Priority::Critical),
| _ => None,
}
}
}
/// Character definition
#[derive(Debug, Clone, PartialEq)]
pub struct Character {
@@ -55,6 +124,9 @@ pub struct Character {
pub species: Option<String>, // `: Species` - what the character fundamentally is
pub fields: Vec<Field>,
pub template: Option<Vec<String>>, // `from Template1, Template2`
pub uses_behaviors: Option<Vec<BehaviorLink>>, // `uses behaviors: [...]`
pub uses_schedule: Option<Vec<String>>, /* `uses schedule: ScheduleName` or `uses schedules:
* [...]` */
pub span: Span,
}
@@ -65,6 +137,9 @@ pub struct Template {
pub fields: Vec<Field>,
pub strict: bool,
pub includes: Vec<String>,
pub uses_behaviors: Option<Vec<BehaviorLink>>, // `uses behaviors: [...]`
pub uses_schedule: Option<Vec<String>>, /* `uses schedule: ScheduleName` or `uses
* schedules: [...]` */
pub span: Span,
}
@@ -155,23 +230,66 @@ pub struct Transition {
pub span: Span,
}
/// Schedule definition
/// Schedule definition with composition support
#[derive(Debug, Clone, PartialEq)]
pub struct Schedule {
pub name: String,
pub extends: Option<String>, // Base schedule to extend
pub blocks: Vec<ScheduleBlock>,
pub recurrences: Vec<RecurrencePattern>, // Recurring events
pub fields: Vec<Field>, // Documentation prose blocks, metadata
pub span: Span,
}
/// A time block in a schedule
#[derive(Debug, Clone, PartialEq)]
pub struct ScheduleBlock {
pub name: Option<String>, // Block name for override system
pub is_override: bool, // Whether this block overrides a base block
pub start: Time,
pub end: Time,
pub activity: String,
pub activity: String, // DEPRECATED: kept for backward compatibility
pub action: Option<Vec<String>>, // Behavior reference (new way)
pub temporal_constraint: Option<TemporalConstraint>, // When this block applies
pub fields: Vec<Field>,
pub span: Span,
}
/// Temporal constraint for when a schedule block applies
#[derive(Debug, Clone, PartialEq)]
pub enum TemporalConstraint {
Season(String), // Applies during specific season (enum value)
DayOfWeek(String), // Applies on specific day of week (enum value)
Month(String), // Applies during specific month (enum value)
DateRange(String, String), // Applies between two dates (TODO: date type)
}
/// Recurring event pattern
#[derive(Debug, Clone, PartialEq)]
pub struct RecurrencePattern {
pub name: String, // Event name (e.g., "MarketDay")
pub constraint: TemporalConstraint, // When it recurs (e.g., "on Earthday")
pub blocks: Vec<ScheduleBlock>, // What happens during the event
pub span: Span,
}
// ===== Parser Helper Types for Schedules =====
/// Helper for parsing schedule bodies with flexible ordering
#[derive(Debug, Clone, PartialEq)]
pub enum ScheduleBodyItem {
Field(Field),
Block(ScheduleBlock),
Recurrence(RecurrencePattern),
}
/// Helper for parsing schedule block content
#[derive(Debug, Clone, PartialEq)]
pub enum BlockContentItem {
TimeRange(Time, Time),
Field(Field),
}
/// Behavior tree definition
#[derive(Debug, Clone, PartialEq)]
pub struct Behavior {
@@ -182,19 +300,73 @@ pub struct Behavior {
#[derive(Debug, Clone, PartialEq)]
pub enum BehaviorNode {
Selector(Vec<BehaviorNode>), // ? operator
Sequence(Vec<BehaviorNode>), // > operator (context-dependent)
Selector {
label: Option<String>,
children: Vec<BehaviorNode>,
},
Sequence {
label: Option<String>,
children: Vec<BehaviorNode>,
},
Condition(Expr),
Action(String, Vec<Field>), // Action name + parameters
Decorator(String, Box<BehaviorNode>),
Decorator {
decorator_type: DecoratorType,
child: Box<BehaviorNode>,
},
SubTree(Vec<String>), // Reference to another behavior
}
#[derive(Debug, Clone, PartialEq)]
pub enum DecoratorType {
Repeat, // infinite loop
RepeatN(u32), // N times
RepeatRange(u32, u32), // min..max times
Invert,
Retry(u32), // max attempts
Timeout(String), // duration string (e.g., "5s", "30m", "2h")
Cooldown(String), // duration string (e.g., "5s", "30m", "2h")
If(Expr),
SucceedAlways,
FailAlways,
}
// BehaviorDuration is used for decorator timeouts/cooldowns (single unit)
// whereas Duration (above) is for general time literals (compound: 2h30m)
#[derive(Debug, Clone, PartialEq)]
pub struct BehaviorDuration {
pub value: u32,
pub unit: DurationUnit,
}
#[derive(Debug, Clone, PartialEq)]
pub enum DurationUnit {
Days,
Hours,
Minutes,
Seconds,
}
impl BehaviorDuration {
pub fn to_milliseconds(&self) -> u64 {
let base_ms = self.value as u64;
match self.unit {
| DurationUnit::Days => base_ms * 24 * 60 * 60 * 1000,
| DurationUnit::Hours => base_ms * 60 * 60 * 1000,
| DurationUnit::Minutes => base_ms * 60 * 1000,
| DurationUnit::Seconds => base_ms * 1000,
}
}
}
/// Institution definition
#[derive(Debug, Clone, PartialEq)]
pub struct Institution {
pub name: String,
pub fields: Vec<Field>,
pub uses_behaviors: Option<Vec<BehaviorLink>>, // `uses behaviors: [...]`
pub uses_schedule: Option<Vec<String>>, /* `uses schedule: ScheduleName` or `uses
* schedules: [...]` */
pub span: Span,
}
@@ -209,10 +381,9 @@ pub struct Relationship {
#[derive(Debug, Clone, PartialEq)]
pub struct Participant {
pub role: Option<String>, // "as parent"
pub name: Vec<String>, // Qualified path
pub self_block: Option<Vec<Field>>,
pub other_block: Option<Vec<Field>>,
pub role: Option<String>, // "as parent" (optional)
pub fields: Vec<Field>, // Participant-specific fields (required block)
pub span: Span,
}
@@ -284,3 +455,40 @@ pub enum QuantifierKind {
ForAll,
Exists,
}
// ===== Parser Helper Types =====
// These enums are used internally by the LALRPOP parser to handle flexible
// ordering
/// Helper for parsing character/institution bodies with flexible ordering
#[derive(Debug, Clone, PartialEq)]
pub enum CharacterBodyItem {
Field(Field),
UsesBehaviors(Vec<BehaviorLink>),
UsesSchedule(Vec<String>),
}
/// Helper for parsing institution bodies with flexible ordering
#[derive(Debug, Clone, PartialEq)]
pub enum InstitutionBodyItem {
Field(Field),
UsesBehaviors(Vec<BehaviorLink>),
UsesSchedule(Vec<String>),
}
/// Helper for parsing template body items with flexible ordering
#[derive(Debug, Clone, PartialEq)]
pub enum TemplateBodyItem {
Field(Field),
Include(String),
UsesBehaviors(Vec<BehaviorLink>),
UsesSchedule(Vec<String>),
}
/// Helper for parsing behavior link fields
#[derive(Debug, Clone, PartialEq)]
pub enum BehaviorLinkField {
Tree(Vec<String>),
Condition(Expr),
Priority(Priority),
}

188
src/syntax/keywords.rs Normal file
View File

@@ -0,0 +1,188 @@
//! Shared keyword definitions derived from the lexer Token enum
//!
//! This module provides structured access to Storybook language keywords
//! without duplicating the keyword strings across different LSP modules.
//! All keywords are defined in the Token enum in lexer.rs using the
//! #[token(...)] attribute.
use crate::syntax::lexer::Token;
/// Top-level declaration keywords that start a new declaration
pub const DECLARATION_KEYWORDS: &[&str] = &[
"character",
"template",
"species",
"behavior",
"life_arc",
"relationship",
"institution",
"location",
"enum",
"schedule",
];
/// All structural keywords (declarations + use)
pub const STRUCTURAL_KEYWORDS: &[&str] = &[
"use",
"character",
"template",
"species",
"behavior",
"life_arc",
"relationship",
"institution",
"location",
"enum",
"schedule",
];
/// Keywords used in behavior tree definitions
pub const BEHAVIOR_KEYWORDS: &[&str] = &[
"choose",
"then",
"if",
"when",
"repeat",
"invert",
"retry",
"timeout",
"cooldown",
// "guard" removed - use "if" instead
"succeed_always",
"fail_always",
];
/// Modifier keywords used in various contexts
pub const MODIFIER_KEYWORDS: &[&str] = &["strict", "include", "from", "as"];
/// State machine keywords
pub const STATE_KEYWORDS: &[&str] = &["state", "on", "enter"];
/// Expression keywords
pub const EXPRESSION_KEYWORDS: &[&str] =
&["forall", "exists", "in", "where", "and", "or", "not", "is"];
/// Special identifier keywords
pub const IDENTIFIER_KEYWORDS: &[&str] = &["self", "other"];
/// Operation keywords
pub const OPERATION_KEYWORDS: &[&str] = &["remove", "append"];
/// Boolean literals
pub const BOOLEAN_LITERALS: &[&str] = &["true", "false"];
/// Check if a string is a top-level declaration keyword
pub fn is_declaration_keyword(s: &str) -> bool {
DECLARATION_KEYWORDS.contains(&s)
}
/// Check if a string is any structural keyword (includes 'use')
pub fn is_structural_keyword(s: &str) -> bool {
STRUCTURAL_KEYWORDS.contains(&s)
}
/// Check if a string is a behavior tree keyword
pub fn is_behavior_keyword(s: &str) -> bool {
BEHAVIOR_KEYWORDS.contains(&s)
}
/// Check if a token is a declaration keyword token
pub fn token_is_declaration_keyword(token: &Token) -> bool {
matches!(
token,
Token::Character |
Token::Template |
Token::Species |
Token::Behavior |
Token::LifeArc |
Token::Relationship |
Token::Institution |
Token::Location |
Token::Enum |
Token::Schedule
)
}
/// Check if a token is a structural keyword token (includes Use)
pub fn token_is_structural_keyword(token: &Token) -> bool {
matches!(
token,
Token::Use |
Token::Character |
Token::Template |
Token::Species |
Token::Behavior |
Token::LifeArc |
Token::Relationship |
Token::Institution |
Token::Location |
Token::Enum |
Token::Schedule
)
}
/// Get the string representation of a declaration token
pub fn declaration_token_to_str(token: &Token) -> Option<&'static str> {
match token {
| Token::Character => Some("character"),
| Token::Template => Some("template"),
| Token::Species => Some("species"),
| Token::Behavior => Some("behavior"),
| Token::LifeArc => Some("life_arc"),
| Token::Relationship => Some("relationship"),
| Token::Institution => Some("institution"),
| Token::Location => Some("location"),
| Token::Enum => Some("enum"),
| Token::Schedule => Some("schedule"),
| _ => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_declaration_keywords() {
assert!(is_declaration_keyword("character"));
assert!(is_declaration_keyword("template"));
assert!(is_declaration_keyword("behavior"));
assert!(!is_declaration_keyword("use"));
assert!(!is_declaration_keyword("unknown"));
}
#[test]
fn test_structural_keywords() {
assert!(is_structural_keyword("character"));
assert!(is_structural_keyword("use"));
assert!(!is_structural_keyword("if"));
}
#[test]
fn test_behavior_keywords() {
assert!(is_behavior_keyword("choose"));
assert!(is_behavior_keyword("repeat"));
assert!(!is_behavior_keyword("character"));
}
#[test]
fn test_token_checks() {
assert!(token_is_declaration_keyword(&Token::Character));
assert!(token_is_declaration_keyword(&Token::Behavior));
assert!(!token_is_declaration_keyword(&Token::Use));
assert!(token_is_structural_keyword(&Token::Use));
assert!(token_is_structural_keyword(&Token::Character));
assert!(!token_is_structural_keyword(&Token::If));
}
#[test]
fn test_token_to_str() {
assert_eq!(
declaration_token_to_str(&Token::Character),
Some("character")
);
assert_eq!(declaration_token_to_str(&Token::Behavior), Some("behavior"));
assert_eq!(declaration_token_to_str(&Token::Use), None);
}
}

View File

@@ -69,11 +69,56 @@ pub enum Token {
From,
#[token("is")]
Is,
#[token("uses")]
Uses,
#[token("behaviors")]
Behaviors,
#[token("schedules")]
Schedules,
#[token("tree")]
Tree,
#[token("priority")]
Priority,
#[token("extends")]
Extends,
#[token("override")]
Override,
#[token("recurrence")]
Recurrence,
#[token("season")]
Season,
#[token("block")]
Block,
#[token("true")]
True,
#[token("false")]
False,
// Behavior tree keywords
#[token("choose")]
Choose,
#[token("then")]
Then,
#[token("if")]
If,
#[token("when")]
When,
#[token("repeat")]
Repeat,
#[token("invert")]
Invert,
#[token("retry")]
Retry,
#[token("timeout")]
Timeout,
#[token("cooldown")]
Cooldown,
// "guard" keyword removed - use "if" instead (Token::If)
#[token("succeed_always")]
SucceedAlways,
#[token("fail_always")]
FailAlways,
// Identifiers and literals
#[regex(r"[a-zA-Z_][a-zA-Z0-9_]*", |lex| lex.slice().to_string())]
Ident(String),

View File

@@ -1,6 +1,7 @@
#![allow(unused_assignments)] // False positives in error enum fields used by thiserror
pub mod ast;
pub mod keywords;
pub mod lexer;
// Parser is generated by LALRPOP
@@ -13,6 +14,12 @@ pub use parser::FileParser;
#[cfg(test)]
mod prop_tests;
#[cfg(test)]
mod resource_linking_tests;
#[cfg(test)]
mod schedule_composition_tests;
use miette::Diagnostic;
use thiserror::Error;

View File

@@ -66,15 +66,44 @@ DottedPath: Vec<String> = {
// ===== Character =====
Character: Character = {
"character" <name:Ident> <species:(":" <Ident>)?> <template:TemplateClause?> "{" <fields:Field*> "}" => Character {
name,
species,
fields,
template,
span: Span::new(0, 0),
"character" <name:Ident> <species:(":" <Ident>)?> <template:TemplateClause?> "{" <body:CharacterBody> "}" => {
Character {
name,
species,
fields: body.0,
template,
uses_behaviors: body.1,
uses_schedule: body.2,
span: Span::new(0, 0),
}
}
};
// Character body can contain fields and uses clauses in any order
CharacterBody: (Vec<Field>, Option<Vec<BehaviorLink>>, Option<Vec<String>>) = {
<items:CharacterBodyItem*> => {
let mut fields = Vec::new();
let mut uses_behaviors = None;
let mut uses_schedule = None;
for item in items {
match item {
CharacterBodyItem::Field(f) => fields.push(f),
CharacterBodyItem::UsesBehaviors(b) => uses_behaviors = Some(b),
CharacterBodyItem::UsesSchedule(s) => uses_schedule = Some(s),
}
}
(fields, uses_behaviors, uses_schedule)
}
};
CharacterBodyItem: CharacterBodyItem = {
<Field> => CharacterBodyItem::Field(<>),
<UsesBehaviorsClause> => CharacterBodyItem::UsesBehaviors(<>),
<UsesScheduleClause> => CharacterBodyItem::UsesSchedule(<>),
};
TemplateClause: Vec<String> = {
"from" <t:Ident> <rest:("," <Ident>)*> => {
let mut templates = vec![t];
@@ -83,18 +112,116 @@ TemplateClause: Vec<String> = {
}
};
// ===== Template =====
// uses behaviors: [...]
UsesBehaviorsClause: Vec<BehaviorLink> = {
"uses" "behaviors" ":" "[" <links:Comma<BehaviorLinkItem>> "]" => links,
};
Template: Template = {
"template" <name:Ident> <strict:"strict"?> "{" <includes:Include*> <fields:Field*> "}" => Template {
name,
fields,
strict: strict.is_some(),
includes,
span: Span::new(0, 0),
// Individual behavior link: { tree: BehaviorName, priority: high, when: condition }
BehaviorLinkItem: BehaviorLink = {
"{" <fields:BehaviorLinkField+> "}" => {
let mut tree = None;
let mut condition = None;
let mut priority = Priority::Normal;
for field in fields {
match field {
BehaviorLinkField::Tree(t) => tree = Some(t),
BehaviorLinkField::Condition(c) => condition = Some(c),
BehaviorLinkField::Priority(p) => priority = p,
}
}
BehaviorLink {
tree: tree.expect("behavior link must have 'tree' field"),
condition,
priority,
span: Span::new(0, 0),
}
}
};
// Fields within a behavior link
BehaviorLinkField: BehaviorLinkField = {
"tree" ":" <path:Path> ","? => BehaviorLinkField::Tree(path),
"when" ":" <expr:Expr> ","? => BehaviorLinkField::Condition(expr),
"priority" ":" <p:PriorityLevel> ","? => BehaviorLinkField::Priority(p),
};
PriorityLevel: Priority = {
<s:Ident> => match s.as_str() {
"low" => Priority::Low,
"normal" => Priority::Normal,
"high" => Priority::High,
"critical" => Priority::Critical,
_ => Priority::Normal, // Default to normal for invalid values
},
};
// uses schedule: ScheduleName or uses schedules: [Name1, Name2]
UsesScheduleClause: Vec<String> = {
"uses" "schedule" ":" <name:Ident> => vec![name],
"uses" "schedules" ":" "[" <names:Comma<Ident>> "]" => names,
};
// ===== Template =====
Template: Template = {
"template" <name:Ident> <strict:"strict"?> "{" <body:TemplateBodyItem*> "}" => {
let mut fields = Vec::new();
let mut includes = Vec::new();
let mut uses_behaviors = None;
let mut uses_schedule = None;
for item in body {
match item {
TemplateBodyItem::Field(f) => fields.push(f),
TemplateBodyItem::Include(inc) => includes.push(inc),
TemplateBodyItem::UsesBehaviors(b) => uses_behaviors = Some(b),
TemplateBodyItem::UsesSchedule(s) => uses_schedule = Some(s),
}
}
Template {
name,
fields,
strict: strict.is_some(),
includes,
uses_behaviors,
uses_schedule,
span: Span::new(0, 0),
}
}
};
// Template body items (fields, includes, uses behaviors, uses schedule)
TemplateBodyItem: TemplateBodyItem = {
<Field> => TemplateBodyItem::Field(<>),
"include" <name:Ident> => TemplateBodyItem::Include(name),
<TemplateUsesBehaviorsClause> => TemplateBodyItem::UsesBehaviors(<>),
<TemplateUsesScheduleClause> => TemplateBodyItem::UsesSchedule(<>),
};
// Template-level behavior links (simple list, no priorities/conditions)
TemplateUsesBehaviorsClause: Vec<BehaviorLink> = {
"uses" "behaviors" ":" <first:Ident> <rest:("," <Ident>)*> => {
let mut names = vec![first];
names.extend(rest);
names.into_iter().map(|name| BehaviorLink {
tree: vec![name],
condition: None,
priority: Priority::Normal,
span: Span::new(0, 0),
}).collect()
},
};
// Template-level schedule links
TemplateUsesScheduleClause: Vec<String> = {
"uses" "schedule" ":" <name:Ident> => vec![name],
};
// Template/Species include clause
Include: String = {
"include" <name:Ident> => name
};
@@ -181,6 +308,11 @@ Duration: Duration = {
}
};
// Duration string for decorator timeouts/cooldowns (e.g., "5s", "30m", "2h", "1d")
BehaviorDurationLit: String = {
<s:DurationLit> => s
};
ProseBlock: ProseBlock = {
ProseBlockToken
};
@@ -233,20 +365,140 @@ Transition: Transition = {
// ===== Schedule =====
Schedule: Schedule = {
"schedule" <name:Ident> "{" <fields:Field*> <blocks:ScheduleBlock*> "}" => Schedule {
// Simple schedule: schedule Name { ... }
"schedule" <name:Ident> "{" <body:ScheduleBody> "}" => Schedule {
name,
blocks,
extends: None,
fields: body.0,
blocks: body.1,
recurrences: body.2,
span: Span::new(0, 0),
},
// Extending schedule: schedule Name extends Base { ... }
"schedule" <name:Ident> "extends" <base:Ident> "{" <body:ScheduleBody> "}" => Schedule {
name,
extends: Some(base),
fields: body.0,
blocks: body.1,
recurrences: body.2,
span: Span::new(0, 0),
}
};
// Schedule body can contain fields (prose blocks), blocks, and recurrence patterns
ScheduleBody: (Vec<Field>, Vec<ScheduleBlock>, Vec<RecurrencePattern>) = {
<items:ScheduleBodyItem*> => {
let mut fields = Vec::new();
let mut blocks = Vec::new();
let mut recurrences = Vec::new();
for item in items {
match item {
ScheduleBodyItem::Field(f) => fields.push(f),
ScheduleBodyItem::Block(b) => blocks.push(b),
ScheduleBodyItem::Recurrence(r) => recurrences.push(r),
}
}
(fields, blocks, recurrences)
}
};
ScheduleBodyItem: ScheduleBodyItem = {
<Field> => ScheduleBodyItem::Field(<>),
<ScheduleBlock> => ScheduleBodyItem::Block(<>),
<RecurrencePattern> => ScheduleBodyItem::Recurrence(<>),
};
ScheduleBlock: ScheduleBlock = {
// Legacy syntax: time -> time : activity { fields }
<start:Time> "->" <end:Time> ":" <activity:Ident> "{" <fields:Field*> "}" => ScheduleBlock {
name: None,
is_override: false,
start,
end,
activity,
action: None,
temporal_constraint: None,
fields,
span: Span::new(0, 0),
},
// Named block: block name { time, action, fields }
"block" <name:Ident> "{" <content:BlockContent> "}" => ScheduleBlock {
name: Some(name),
is_override: false,
start: content.0,
end: content.1,
activity: String::new(), // Empty for new syntax
action: content.2,
temporal_constraint: None,
fields: content.3,
span: Span::new(0, 0),
},
// Override block: override name { time, action, fields }
"override" <name:Ident> "{" <content:BlockContent> "}" => ScheduleBlock {
name: Some(name),
is_override: true,
start: content.0,
end: content.1,
activity: String::new(), // Empty for new syntax
action: content.2,
temporal_constraint: None,
fields: content.3,
span: Span::new(0, 0),
}
};
// Block content: time range, optional action, and fields
BlockContent: (Time, Time, Option<Vec<String>>, Vec<Field>) = {
<items:BlockContentItem+> => {
let mut start = None;
let mut end = None;
let mut action = None;
let mut fields = Vec::new();
for item in items {
match item {
BlockContentItem::TimeRange(s, e) => {
start = Some(s);
end = Some(e);
}
BlockContentItem::Field(f) => {
if f.name == "action" {
// Extract action as qualified path from identifier value
if let Value::Identifier(path) = &f.value {
action = Some(path.clone());
}
} else {
fields.push(f);
}
}
}
}
(
start.expect("block must have time range"),
end.expect("block must have time range"),
action,
fields
)
}
};
BlockContentItem: BlockContentItem = {
<start:Time> "->" <end:Time> ","? => BlockContentItem::TimeRange(start, end),
<Field> => BlockContentItem::Field(<>),
};
// Recurrence pattern: recurrence Name on DayOfWeek { blocks }
RecurrencePattern: RecurrencePattern = {
"recurrence" <name:Ident> "on" <day:Ident> "{" <blocks:ScheduleBlock+> "}" => RecurrencePattern {
name,
constraint: TemporalConstraint::DayOfWeek(day),
blocks,
span: Span::new(0, 0),
}
};
@@ -263,23 +515,116 @@ Behavior: Behavior = {
BehaviorNode: BehaviorNode = {
<SelectorNode>,
<SequenceNode>,
<RepeatNode>,
<ConditionNode>,
<DecoratorNode>,
<ActionNode>,
<SubTreeNode>,
};
// Selector node: choose { ... } or choose label { ... }
SelectorNode: BehaviorNode = {
"?" "{" <nodes:BehaviorNode+> "}" => BehaviorNode::Selector(nodes),
"choose" <label:Ident?> "{" <children:BehaviorNode+> "}" => BehaviorNode::Selector {
label,
children,
},
};
// Sequence node: then { ... } or then label { ... }
SequenceNode: BehaviorNode = {
">" "{" <nodes:BehaviorNode+> "}" => BehaviorNode::Sequence(nodes),
"then" <label:Ident?> "{" <children:BehaviorNode+> "}" => BehaviorNode::Sequence {
label,
children,
},
};
RepeatNode: BehaviorNode = {
"*" "{" <node:BehaviorNode> "}" => BehaviorNode::Decorator("repeat".to_string(), Box::new(node)),
// Condition node: if(expr) or when(expr)
// if(expr) { child } is the decorator form (replaces old "guard" keyword)
ConditionNode: BehaviorNode = {
"if" "(" <condition:Expr> ")" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::If(condition),
child: Box::new(child),
},
"if" "(" <condition:Expr> ")" => BehaviorNode::Condition(condition),
"when" "(" <condition:Expr> ")" => BehaviorNode::Condition(condition),
};
// Decorator node: keyword [params] { child }
DecoratorNode: BehaviorNode = {
<DecoratorRepeat>,
<DecoratorRepeatN>,
<DecoratorRepeatRange>,
<DecoratorInvert>,
<DecoratorRetry>,
<DecoratorTimeout>,
<DecoratorCooldown>,
<DecoratorSucceedAlways>,
<DecoratorFailAlways>,
};
DecoratorRepeat: BehaviorNode = {
"repeat" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::Repeat,
child: Box::new(child),
},
};
DecoratorRepeatN: BehaviorNode = {
"repeat" "(" <n:IntLit> ")" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::RepeatN(n as u32),
child: Box::new(child),
},
};
DecoratorRepeatRange: BehaviorNode = {
"repeat" "(" <min:IntLit> ".." <max:IntLit> ")" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::RepeatRange(min as u32, max as u32),
child: Box::new(child),
},
};
DecoratorInvert: BehaviorNode = {
"invert" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::Invert,
child: Box::new(child),
},
};
DecoratorRetry: BehaviorNode = {
"retry" "(" <n:IntLit> ")" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::Retry(n as u32),
child: Box::new(child),
},
};
DecoratorTimeout: BehaviorNode = {
"timeout" "(" <duration:BehaviorDurationLit> ")" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::Timeout(duration),
child: Box::new(child),
},
};
DecoratorCooldown: BehaviorNode = {
"cooldown" "(" <duration:BehaviorDurationLit> ")" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::Cooldown(duration),
child: Box::new(child),
},
};
DecoratorSucceedAlways: BehaviorNode = {
"succeed_always" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::SucceedAlways,
child: Box::new(child),
},
};
DecoratorFailAlways: BehaviorNode = {
"fail_always" "{" <child:BehaviorNode> "}" => BehaviorNode::Decorator {
decorator_type: DecoratorType::FailAlways,
child: Box::new(child),
},
};
// Action node: action_name or action_name(params)
ActionNode: BehaviorNode = {
<name:Ident> "(" <params:Comma<ActionParam>> ")" => BehaviorNode::Action(name, params),
<name:Ident> => BehaviorNode::Action(name, vec![]),
@@ -300,20 +645,50 @@ ActionParam: Field = {
},
};
// Subtree node: include path::to::subtree
SubTreeNode: BehaviorNode = {
"@" <path:Path> => BehaviorNode::SubTree(path),
"include" <path:Path> => BehaviorNode::SubTree(path),
};
// ===== Institution =====
Institution: Institution = {
"institution" <name:Ident> "{" <fields:Field*> "}" => Institution {
name,
fields,
span: Span::new(0, 0),
"institution" <name:Ident> "{" <body:InstitutionBody> "}" => {
Institution {
name,
fields: body.0,
uses_behaviors: body.1,
uses_schedule: body.2,
span: Span::new(0, 0),
}
}
};
// Institution body can contain fields and uses clauses in any order
InstitutionBody: (Vec<Field>, Option<Vec<BehaviorLink>>, Option<Vec<String>>) = {
<items:InstitutionBodyItem*> => {
let mut fields = Vec::new();
let mut uses_behaviors = None;
let mut uses_schedule = None;
for item in items {
match item {
InstitutionBodyItem::Field(f) => fields.push(f),
InstitutionBodyItem::UsesBehaviors(b) => uses_behaviors = Some(b),
InstitutionBodyItem::UsesSchedule(s) => uses_schedule = Some(s),
}
}
(fields, uses_behaviors, uses_schedule)
}
};
InstitutionBodyItem: InstitutionBodyItem = {
<Field> => InstitutionBodyItem::Field(<>),
<UsesBehaviorsClause> => InstitutionBodyItem::UsesBehaviors(<>),
<UsesScheduleClause> => InstitutionBodyItem::UsesSchedule(<>),
};
// ===== Relationship =====
Relationship: Relationship = {
@@ -326,40 +701,22 @@ Relationship: Relationship = {
};
Participant: Participant = {
// Participant with inline block after name
<name:Path> "{" <fields:Field*> "}" => Participant {
role: None,
name,
self_block: Some(fields),
other_block: None,
span: Span::new(0, 0),
},
// Participant with role and inline block
// Participant with role and block (block required)
<name:Path> "as" <role:Ident> "{" <fields:Field*> "}" => Participant {
name,
role: Some(role),
name,
self_block: Some(fields),
other_block: None,
fields,
span: Span::new(0, 0),
},
// Participant without blocks (bare name)
<name:Path> => Participant {
// Participant without role (block still required)
<name:Path> "{" <fields:Field*> "}" => Participant {
name,
role: None,
name,
self_block: None,
other_block: None,
fields,
span: Span::new(0, 0),
},
};
SelfBlock: Vec<Field> = {
"self" "{" <fields:Field*> "}" => fields
};
OtherBlock: Vec<Field> = {
"other" "{" <fields:Field*> "}" => fields
};
// ===== Location =====
Location: Location = {
@@ -540,9 +897,32 @@ extern {
"include" => Token::Include,
"from" => Token::From,
"is" => Token::Is,
"uses" => Token::Uses,
"behaviors" => Token::Behaviors,
"schedules" => Token::Schedules,
"tree" => Token::Tree,
"priority" => Token::Priority,
"extends" => Token::Extends,
"override" => Token::Override,
"recurrence" => Token::Recurrence,
"season" => Token::Season,
"block" => Token::Block,
"true" => Token::True,
"false" => Token::False,
// Behavior tree keywords
"choose" => Token::Choose,
"then" => Token::Then,
"if" => Token::If,
"when" => Token::When,
"repeat" => Token::Repeat,
"invert" => Token::Invert,
"retry" => Token::Retry,
"timeout" => Token::Timeout,
"cooldown" => Token::Cooldown,
"succeed_always" => Token::SucceedAlways,
"fail_always" => Token::FailAlways,
// Literals
Ident => Token::Ident(<String>),
IntLit => Token::IntLit(<i64>),

File diff suppressed because it is too large Load Diff

View File

@@ -44,7 +44,20 @@ fn valid_ident() -> impl Strategy<Value = String> {
"not" |
"is" |
"true" |
"false"
"false" |
// Behavior tree keywords
"if" |
"when" |
"choose" |
"then" |
"include" |
"repeat" |
"invert" |
"retry" |
"timeout" |
"cooldown" |
"succeed_always" |
"fail_always"
)
})
}
@@ -167,7 +180,8 @@ proptest! {
fn test_keywords_are_distinct_from_idents(
keyword in prop::sample::select(vec![
"character", "template", "enum", "use", "self", "other",
"and", "or", "not", "is", "true", "false"
"and", "or", "not", "is", "true", "false",
"if", "when", "choose", "then", "include"
])
) {
let lexer = Lexer::new(keyword);
@@ -305,7 +319,7 @@ fn valid_relationship() -> impl Strategy<Value = String> {
.collect::<Vec<_>>()
.join("\n");
format!(
"relationship {} {{\n {}\n {}\n{}\n}}",
"relationship {} {{\n {} {{ }}\n {} {{ }}\n{}\n}}",
name, person1, person2, fields_str
)
})
@@ -600,13 +614,13 @@ fn valid_behavior_node_depth(depth: u32) -> BoxedStrategy<String> {
// Base case: just actions or subtrees
prop_oneof![
valid_action_node(),
valid_ident().prop_map(|name| format!("@{}", name)),
valid_ident().prop_map(|name| format!("include {}", name)),
]
.boxed()
} else {
// Recursive case: can be action, subtree, selector, or sequence
let action = valid_action_node();
let subtree = valid_ident().prop_map(|name| format!("@{}", name));
let subtree = valid_ident().prop_map(|name| format!("include {}", name));
let selector = prop::collection::vec(valid_behavior_node_depth(depth - 1), 1..3).prop_map(
|children| {
@@ -615,7 +629,7 @@ fn valid_behavior_node_depth(depth: u32) -> BoxedStrategy<String> {
.map(|c| format!(" {}", c))
.collect::<Vec<_>>()
.join("\n");
format!("? {{\n{}\n }}", children_str)
format!("choose {{\n{}\n }}", children_str)
},
);
@@ -626,7 +640,7 @@ fn valid_behavior_node_depth(depth: u32) -> BoxedStrategy<String> {
.map(|c| format!(" {}", c))
.collect::<Vec<_>>()
.join("\n");
format!("> {{\n{}\n }}", children_str)
format!("then {{\n{}\n }}", children_str)
},
);
@@ -706,7 +720,7 @@ proptest! {
#[test]
fn test_behavior_tree_deeply_nested(name in valid_ident()) {
let input = format!(
"behavior {} {{\n > {{\n ? {{\n > {{\n action\n }}\n }}\n }}\n}}",
"behavior {} {{\n then {{\n choose {{\n then {{\n action\n }}\n }}\n }}\n}}",
name
);
let lexer = Lexer::new(&input);
@@ -738,7 +752,7 @@ proptest! {
subtree_path in prop::collection::vec(valid_ident(), 1..3)
) {
let path = subtree_path.join("::");
let input = format!("behavior {} {{\n @{}\n}}", name, path);
let input = format!("behavior {} {{\n include {}\n}}", name, path);
let lexer = Lexer::new(&input);
let parser = FileParser::new();
let result = parser.parse(lexer);
@@ -754,7 +768,7 @@ proptest! {
.map(|c| format!(" {}", c))
.collect::<Vec<_>>()
.join("\n");
let input = format!("behavior {} {{\n ? {{\n{}\n }}\n}}", name, children_str);
let input = format!("behavior {} {{\n choose {{\n{}\n }}\n}}", name, children_str);
let lexer = Lexer::new(&input);
let parser = FileParser::new();
let result = parser.parse(lexer);
@@ -770,7 +784,7 @@ proptest! {
.map(|c| format!(" {}", c))
.collect::<Vec<_>>()
.join("\n");
let input = format!("behavior {} {{\n > {{\n{}\n }}\n}}", name, children_str);
let input = format!("behavior {} {{\n then {{\n{}\n }}\n}}", name, children_str);
let lexer = Lexer::new(&input);
let parser = FileParser::new();
let result = parser.parse(lexer);

View File

@@ -0,0 +1,367 @@
//! Tests for resource linking syntax (uses behaviors/schedules)
use crate::syntax::{
ast::*,
lexer::Lexer,
FileParser,
};
#[test]
fn test_character_with_single_behavior_link() {
let input = r#"
character Martha: Human {
age: 42
uses behaviors: [
{ tree: BakeryWork, priority: normal }
]
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
assert_eq!(file.declarations.len(), 1);
match &file.declarations[0] {
| Declaration::Character(c) => {
assert_eq!(c.name, "Martha");
assert_eq!(c.species.as_ref().unwrap(), "Human");
// Check behavior links
assert!(c.uses_behaviors.is_some());
let behaviors = c.uses_behaviors.as_ref().unwrap();
assert_eq!(behaviors.len(), 1);
assert_eq!(behaviors[0].tree, vec!["BakeryWork"]);
assert_eq!(behaviors[0].priority, Priority::Normal);
assert!(behaviors[0].condition.is_none());
},
| _ => panic!("Expected Character declaration"),
}
}
#[test]
fn test_character_with_multiple_behavior_links() {
let input = r#"
character Martha: Human {
uses behaviors: [
{ tree: HandleUrgentNeeds, priority: critical },
{ tree: BakeryWork, priority: normal, when: time.hour >= 5 and time.hour < 13 },
{ tree: Idle, priority: low }
]
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Character(c) => {
let behaviors = c.uses_behaviors.as_ref().unwrap();
assert_eq!(behaviors.len(), 3);
assert_eq!(behaviors[0].tree, vec!["HandleUrgentNeeds"]);
assert_eq!(behaviors[0].priority, Priority::Critical);
assert_eq!(behaviors[1].tree, vec!["BakeryWork"]);
assert_eq!(behaviors[1].priority, Priority::Normal);
assert!(behaviors[1].condition.is_some()); // Has when clause
assert_eq!(behaviors[2].tree, vec!["Idle"]);
assert_eq!(behaviors[2].priority, Priority::Low);
},
| _ => panic!("Expected Character declaration"),
}
}
#[test]
fn test_character_with_schedule_link() {
let input = r#"
character Martha: Human {
uses schedule: BakerSchedule
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Character(c) => {
assert!(c.uses_schedule.is_some());
let schedules = c.uses_schedule.as_ref().unwrap();
assert_eq!(schedules.len(), 1);
assert_eq!(schedules[0], "BakerSchedule");
},
| _ => panic!("Expected Character declaration"),
}
}
#[test]
fn test_character_with_multiple_schedules() {
let input = r#"
character Martha: Human {
uses schedules: [WorkdaySchedule, WeekendSchedule]
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Character(c) => {
let schedules = c.uses_schedule.as_ref().unwrap();
assert_eq!(schedules.len(), 2);
assert_eq!(schedules[0], "WorkdaySchedule");
assert_eq!(schedules[1], "WeekendSchedule");
},
| _ => panic!("Expected Character declaration"),
}
}
#[test]
fn test_character_with_behaviors_and_schedule() {
let input = r#"
character Martha: Human {
age: 42
uses behaviors: [
{ tree: BakeryWork, priority: normal }
]
profession: "Baker"
uses schedule: BakerSchedule
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Character(c) => {
// Check fields
assert_eq!(c.fields.len(), 2);
// Check behaviors
assert!(c.uses_behaviors.is_some());
let behaviors = c.uses_behaviors.as_ref().unwrap();
assert_eq!(behaviors.len(), 1);
// Check schedule
assert!(c.uses_schedule.is_some());
let schedules = c.uses_schedule.as_ref().unwrap();
assert_eq!(schedules.len(), 1);
},
| _ => panic!("Expected Character declaration"),
}
}
#[test]
fn test_institution_with_behavior_links() {
let input = r#"
institution Bakery {
type: "Business"
uses behaviors: [
{ tree: BakeryOperations, priority: high }
]
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Institution(inst) => {
assert_eq!(inst.name, "Bakery");
assert!(inst.uses_behaviors.is_some());
let behaviors = inst.uses_behaviors.as_ref().unwrap();
assert_eq!(behaviors.len(), 1);
assert_eq!(behaviors[0].tree, vec!["BakeryOperations"]);
assert_eq!(behaviors[0].priority, Priority::High);
},
| _ => panic!("Expected Institution declaration"),
}
}
#[test]
fn test_qualified_path_behavior_tree() {
let input = r#"
character Martha: Human {
uses behaviors: [
{ tree: village::baker::BakeryWork, priority: normal }
]
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Character(c) => {
let behaviors = c.uses_behaviors.as_ref().unwrap();
assert_eq!(behaviors[0].tree, vec!["village", "baker", "BakeryWork"]);
},
| _ => panic!("Expected Character declaration"),
}
}
#[test]
fn test_all_priority_levels() {
let input = r#"
character Test: Human {
uses behaviors: [
{ tree: A, priority: low },
{ tree: B, priority: normal },
{ tree: C, priority: high },
{ tree: D, priority: critical }
]
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Character(c) => {
let behaviors = c.uses_behaviors.as_ref().unwrap();
assert_eq!(behaviors[0].priority, Priority::Low);
assert_eq!(behaviors[1].priority, Priority::Normal);
assert_eq!(behaviors[2].priority, Priority::High);
assert_eq!(behaviors[3].priority, Priority::Critical);
},
| _ => panic!("Expected Character declaration"),
}
}
#[test]
fn test_template_with_simple_behavior_links() {
let input = r#"
template Baker {
uses behaviors: BakingSkills, CustomerService
specialty: "bread"
baking_skill: 0.0..1.0
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Template(t) => {
assert_eq!(t.name, "Baker");
// Check behavior links
assert!(t.uses_behaviors.is_some());
let behaviors = t.uses_behaviors.as_ref().unwrap();
assert_eq!(behaviors.len(), 2);
assert_eq!(behaviors[0].tree, vec!["BakingSkills"]);
assert_eq!(behaviors[0].priority, Priority::Normal);
assert_eq!(behaviors[1].tree, vec!["CustomerService"]);
assert_eq!(behaviors[1].priority, Priority::Normal);
},
| _ => panic!("Expected Template declaration"),
}
}
#[test]
fn test_template_with_schedule_link() {
let input = r#"
template Baker {
uses schedule: BakerSchedule
specialty: "bread"
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Template(t) => {
assert_eq!(t.name, "Baker");
// Check schedule link
assert!(t.uses_schedule.is_some());
let schedules = t.uses_schedule.as_ref().unwrap();
assert_eq!(schedules.len(), 1);
assert_eq!(schedules[0], "BakerSchedule");
},
| _ => panic!("Expected Template declaration"),
}
}
#[test]
fn test_template_with_behaviors_and_schedule() {
let input = r#"
template Baker {
include Person
uses behaviors: BakingSkills, CustomerService
uses schedule: BakerSchedule
specialty: "bread"
baking_skill: 0.0..1.0
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Template(t) => {
assert_eq!(t.name, "Baker");
// Check include
assert_eq!(t.includes.len(), 1);
assert_eq!(t.includes[0], "Person");
// Check behavior links
assert!(t.uses_behaviors.is_some());
let behaviors = t.uses_behaviors.as_ref().unwrap();
assert_eq!(behaviors.len(), 2);
// Check schedule link
assert!(t.uses_schedule.is_some());
let schedules = t.uses_schedule.as_ref().unwrap();
assert_eq!(schedules.len(), 1);
},
| _ => panic!("Expected Template declaration"),
}
}

View File

@@ -0,0 +1,324 @@
//! Tests for year-long composable schedule system
use crate::syntax::{
ast::*,
lexer::Lexer,
FileParser,
};
#[test]
fn test_simple_schedule_backward_compat() {
let input = r#"
schedule DailyRoutine {
09:00 -> 17:00 : work {
place: "Office"
}
18:00 -> 19:00 : dinner {}
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
assert_eq!(file.declarations.len(), 1);
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(s.name, "DailyRoutine");
assert!(s.extends.is_none());
assert_eq!(s.blocks.len(), 2);
assert_eq!(s.recurrences.len(), 0);
// First block
assert_eq!(s.blocks[0].activity, "work");
assert!(s.blocks[0].name.is_none());
assert!(!s.blocks[0].is_override);
},
| _ => panic!("Expected Schedule declaration"),
}
}
#[test]
fn test_schedule_extends() {
let input = r#"
schedule BakerSchedule extends BaseWorkday {
block work { 05:00 -> 13:00, action: BakingWork }
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(s.name, "BakerSchedule");
assert_eq!(s.extends, Some("BaseWorkday".to_string()));
},
| _ => panic!("Expected Schedule declaration"),
}
}
#[test]
fn test_named_block_with_action() {
let input = r#"
schedule WorkSchedule {
block work { 09:00 -> 17:00, action: WorkBehavior }
block lunch { 12:00 -> 13:00, action: EatLunch }
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(s.blocks.len(), 2);
// First block
assert_eq!(s.blocks[0].name, Some("work".to_string()));
assert!(!s.blocks[0].is_override);
assert_eq!(s.blocks[0].action, Some(vec!["WorkBehavior".to_string()]));
assert_eq!(s.blocks[0].start.hour, 9);
assert_eq!(s.blocks[0].end.hour, 17);
// Second block
assert_eq!(s.blocks[1].name, Some("lunch".to_string()));
assert_eq!(s.blocks[1].action, Some(vec!["EatLunch".to_string()]));
},
| _ => panic!("Expected Schedule declaration"),
}
}
#[test]
fn test_override_block() {
let input = r#"
schedule BakerSchedule extends BaseWorkday {
override work { 05:00 -> 13:00, action: BakingWork }
block prep { 03:00 -> 05:00, action: PrepBread }
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(s.blocks.len(), 2);
// Override block
assert_eq!(s.blocks[0].name, Some("work".to_string()));
assert!(s.blocks[0].is_override);
assert_eq!(s.blocks[0].action, Some(vec!["BakingWork".to_string()]));
// Regular named block
assert_eq!(s.blocks[1].name, Some("prep".to_string()));
assert!(!s.blocks[1].is_override);
},
| _ => panic!("Expected Schedule declaration"),
}
}
#[test]
fn test_recurrence_pattern() {
let input = r#"
schedule WeeklySchedule {
recurrence MarketDay on Earthday {
block market { 08:00 -> 13:00, action: SellAtMarket }
block restock { 14:00 -> 16:00, action: RestockGoods }
}
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(s.recurrences.len(), 1);
let recurrence = &s.recurrences[0];
assert_eq!(recurrence.name, "MarketDay");
assert_eq!(
recurrence.constraint,
TemporalConstraint::DayOfWeek("Earthday".to_string())
);
assert_eq!(recurrence.blocks.len(), 2);
// First block in recurrence
assert_eq!(recurrence.blocks[0].name, Some("market".to_string()));
assert_eq!(
recurrence.blocks[0].action,
Some(vec!["SellAtMarket".to_string()])
);
},
| _ => panic!("Expected Schedule declaration"),
}
}
#[test]
fn test_qualified_path_action() {
let input = r#"
schedule WorkSchedule {
block work { 09:00 -> 17:00, action: village::baker::BakingWork }
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(
s.blocks[0].action,
Some(vec![
"village".to_string(),
"baker".to_string(),
"BakingWork".to_string()
])
);
},
| _ => panic!("Expected Schedule declaration"),
}
}
#[test]
fn test_mixed_blocks_and_recurrences() {
let input = r#"
schedule CompleteSchedule extends BaseSchedule {
block morning { 06:00 -> 12:00, action: MorningRoutine }
recurrence Weekend on Spiritday {
block leisure { 10:00 -> 18:00, action: RelaxAndPlay }
}
override evening { 18:00 -> 22:00, action: EveningRoutine }
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(s.name, "CompleteSchedule");
assert_eq!(s.extends, Some("BaseSchedule".to_string()));
assert_eq!(s.blocks.len(), 2); // morning and override evening
assert_eq!(s.recurrences.len(), 1); // Weekend
// Check we have both regular and override blocks
let has_regular = s.blocks.iter().any(|b| !b.is_override);
let has_override = s.blocks.iter().any(|b| b.is_override);
assert!(has_regular && has_override);
},
| _ => panic!("Expected Schedule declaration"),
}
}
#[test]
fn test_block_with_fields() {
let input = r#"
schedule WorkSchedule {
block work {
09:00 -> 17:00
action: WorkBehavior
intensity: "high"
place: "office"
}
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(s.blocks[0].fields.len(), 2); // intensity and place
assert_eq!(s.blocks[0].action, Some(vec!["WorkBehavior".to_string()]));
},
| _ => panic!("Expected Schedule declaration"),
}
}
#[test]
fn test_empty_schedule() {
let input = r#"
schedule EmptySchedule {
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(s.blocks.len(), 0);
assert_eq!(s.recurrences.len(), 0);
},
| _ => panic!("Expected Schedule declaration"),
}
}
#[test]
fn test_schedule_only_recurrences() {
let input = r#"
schedule EventSchedule {
recurrence Festival on FirstDayOfSummer {
block celebration { 10:00 -> 22:00, action: Celebrate }
}
recurrence MarketDay on Earthday {
block market { 08:00 -> 14:00, action: TradingBehavior }
}
}
"#;
let lexer = Lexer::new(input);
let parser = FileParser::new();
let result = parser.parse(lexer);
assert!(result.is_ok(), "Failed to parse: {:?}", result.err());
let file = result.unwrap();
match &file.declarations[0] {
| Declaration::Schedule(s) => {
assert_eq!(s.blocks.len(), 0);
assert_eq!(s.recurrences.len(), 2);
},
| _ => panic!("Expected Schedule declaration"),
}
}

View File

@@ -11,6 +11,7 @@
use std::collections::HashMap;
use crate::syntax::ast::{
BehaviorLink,
BehaviorNode,
Participant,
ProseBlock,
@@ -48,6 +49,8 @@ pub struct ResolvedCharacter {
pub species: Option<String>,
pub fields: HashMap<String, Value>,
pub prose_blocks: HashMap<String, ProseBlock>,
pub uses_behaviors: Option<Vec<BehaviorLink>>,
pub uses_schedule: Option<Vec<String>>,
pub span: Span,
}