release: Storybook v0.2.0 - Major syntax and features update

BREAKING CHANGES:
- Relationship syntax now requires blocks for all participants
- Removed self/other perspective blocks from relationships
- Replaced 'guard' keyword with 'if' for behavior tree decorators

Language Features:
- Add tree-sitter grammar with improved if/condition disambiguation
- Add comprehensive tutorial and reference documentation
- Add SBIR v0.2.0 binary format specification
- Add resource linking system for behaviors and schedules
- Add year-long schedule patterns (day, season, recurrence)
- Add behavior tree enhancements (named nodes, decorators)

Documentation:
- Complete tutorial series (9 chapters) with baker family examples
- Complete reference documentation for all language features
- SBIR v0.2.0 specification with binary format details
- Added locations and institutions documentation

Examples:
- Convert all examples to baker family scenario
- Add comprehensive working examples

Tooling:
- Zed extension with LSP integration
- Tree-sitter grammar for syntax highlighting
- Build scripts and development tools

Version Updates:
- Main package: 0.1.0 → 0.2.0
- Tree-sitter grammar: 0.1.0 → 0.2.0
- Zed extension: 0.1.0 → 0.2.0
- Storybook editor: 0.1.0 → 0.2.0
This commit is contained in:
2026-02-13 21:52:03 +00:00
parent 80332971b8
commit 16deb5d237
290 changed files with 90316 additions and 5827 deletions

159
src/lsp/behavior_tests.rs Normal file
View File

@@ -0,0 +1,159 @@
//! Tests for behavior tree support in LSP
#[cfg(test)]
mod tests {
use crate::lsp::{
document::Document,
symbols,
};
const BEHAVIOR_SAMPLE: &str = r#"
behavior SimpleBehavior {
walk_around
}
behavior FindFood {
choose {
check_hungry
then {
find_nearest_food
move_to_food
eat
}
}
}
behavior SocialBehavior {
then {
greet
small_talk
farewell
}
}
behavior ComplexDecisionTree {
choose {
then {
check_threat
flee_to_safety
}
then {
check_resources
gather_resources
}
idle
}
}
"#;
#[test]
fn test_behavior_parsing() {
let doc = Document::new(BEHAVIOR_SAMPLE.to_string());
if !doc.parse_errors.is_empty() {
for err in &doc.parse_errors {
eprintln!("Parse error: {}", err.message);
}
}
assert!(doc.ast.is_some(), "Should parse behavior trees");
assert!(doc.parse_errors.is_empty(), "Should have no parse errors");
}
#[test]
fn test_behavior_symbols() {
let doc = Document::new(BEHAVIOR_SAMPLE.to_string());
// Should extract behavior declarations
assert!(doc.name_table.resolve_name("SimpleBehavior").is_some());
assert!(doc.name_table.resolve_name("FindFood").is_some());
assert!(doc.name_table.resolve_name("SocialBehavior").is_some());
assert!(doc.name_table.resolve_name("ComplexDecisionTree").is_some());
}
#[test]
fn test_behavior_symbol_kinds() {
use crate::resolve::names::DeclKind;
let doc = Document::new(BEHAVIOR_SAMPLE.to_string());
let find_food = doc.name_table.resolve_name("FindFood").unwrap();
assert_eq!(find_food.kind, DeclKind::Behavior);
}
#[test]
fn test_behavior_in_document_symbols() {
let doc = Document::new(BEHAVIOR_SAMPLE.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
// Should have behavior symbols
let behaviors: Vec<_> = symbols
.iter()
.filter(|s| s.kind == tower_lsp::lsp_types::SymbolKind::MODULE)
.collect();
assert_eq!(behaviors.len(), 4, "Should have 4 behaviors");
assert!(behaviors.iter().any(|b| b.name == "SimpleBehavior"));
assert!(behaviors.iter().any(|b| b.name == "FindFood"));
assert!(behaviors.iter().any(|b| b.name == "SocialBehavior"));
assert!(behaviors.iter().any(|b| b.name == "ComplexDecisionTree"));
}
#[test]
fn test_behavior_with_subtrees() {
let sample = r#"
behavior WithSubtrees {
then {
include helpers::check_preconditions
main_action
include helpers::cleanup
}
}
"#;
let doc = Document::new(sample.to_string());
assert!(doc.ast.is_some(), "Should parse subtrees");
assert!(doc.name_table.resolve_name("WithSubtrees").is_some());
}
#[test]
fn test_behavior_simple_action() {
let sample = r#"
behavior SimpleAction {
walk_around
}
"#;
let doc = Document::new(sample.to_string());
assert!(doc.ast.is_some(), "Should parse simple action");
assert!(doc.name_table.resolve_name("SimpleAction").is_some());
}
#[test]
fn test_behavior_selectors_and_sequences() {
let sample = r#"
behavior SelectorExample {
choose {
option_one
option_two
default_option
}
}
behavior SequenceExample {
then {
step_one
step_two
step_three
}
}
"#;
let doc = Document::new(sample.to_string());
assert!(doc.ast.is_some(), "Should parse selectors and sequences");
assert_eq!(doc.name_table.all_entries().count(), 2);
}
}

2578
src/lsp/code_actions.rs Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

775
src/lsp/completion.rs Normal file
View File

@@ -0,0 +1,775 @@
//! Autocomplete/completion provider
//!
//! Provides context-aware completion suggestions for:
//! - Keywords (filtered by context)
//! - Entity names (characters, templates, etc.)
//! - Field names (from templates/species when in character block)
//! - Type names (templates/species when after ':')
//! - Enum values
//! - Action names (in behavior trees)
use tower_lsp::lsp_types::{
CompletionItem,
CompletionItemKind,
CompletionList,
CompletionParams,
CompletionResponse,
Documentation,
MarkupContent,
MarkupKind,
};
use super::document::Document;
use crate::syntax::ast::Value;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum CompletionContext {
/// Top-level of the document
TopLevel,
/// Inside a character/template field block
InFieldBlock,
/// After a colon (expecting a type or value)
AfterColon,
/// Inside a behavior tree
InBehavior,
/// Inside a life arc
InLifeArc,
/// Inside a relationship
InRelationship,
/// Unknown context
Unknown,
}
/// Get completion items at a position
pub fn get_completions(doc: &Document, params: &CompletionParams) -> Option<CompletionResponse> {
let position = params.text_document_position.position;
// Check for field accessor using the specialized function
// It will return Some only if there's an identifier followed by a dot
if let Some(field_items) = get_field_accessor_completions(doc, position) {
return Some(CompletionResponse::List(CompletionList {
is_incomplete: false,
items: field_items,
}));
}
// Convert position to byte offset for context-based completions
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Check if we're typing a new identifier name after a declaration keyword
if is_typing_declaration_name(&doc.text, offset) {
// Don't show completions when typing a new identifier
return None;
}
// Determine context by analyzing text around cursor
let context = determine_context(&doc.text, offset);
let mut items = Vec::new();
match context {
| CompletionContext::TopLevel => {
// At top level, suggest declaration keywords
items.extend(top_level_keyword_completions());
// Also suggest existing entity names for reference
items.extend(entity_completions(doc));
},
| CompletionContext::InFieldBlock => {
// Inside a field block, suggest fields from the species/templates
if let Some(species_fields) = get_contextual_field_completions(doc, offset) {
items.extend(species_fields);
} else {
// Fallback to generic field keywords if we can't determine context
items.extend(field_keyword_completions());
}
},
| CompletionContext::AfterColon => {
// After colon, suggest types (templates, species)
items.extend(type_completions(doc));
},
| CompletionContext::InBehavior => {
// In behavior tree, suggest behavior-specific keywords
items.extend(behavior_keyword_completions());
items.extend(behavior_completions(doc)); // Reference to other
// behaviors
},
| CompletionContext::InLifeArc => {
// In life arc, suggest state-related keywords
items.extend(life_arc_keyword_completions());
},
| CompletionContext::InRelationship => {
// In relationship, suggest relationship-specific keywords
items.extend(relationship_keyword_completions());
items.extend(character_completions(doc)); // For participants
},
| CompletionContext::Unknown => {
// When context is unclear, provide all completions
items.extend(all_keyword_completions());
items.extend(entity_completions(doc));
},
}
// Set sort_text for proper ordering: field accessors first (0xxx), then others
// (1xxx)
for item in &mut items {
let detail = item.detail.as_deref().unwrap_or("");
let is_field = detail.contains("field") || detail.contains("trait");
// Field accessors get "0" prefix, others get "1" prefix
let prefix = if is_field { "0" } else { "1" };
item.sort_text = Some(format!("{}{}", prefix, item.label));
}
// Sort by sort_text for consistent ordering
items.sort_by(|a, b| {
let sort_a = a.sort_text.as_deref().unwrap_or(&a.label);
let sort_b = b.sort_text.as_deref().unwrap_or(&b.label);
sort_a.cmp(sort_b)
});
Some(CompletionResponse::Array(items))
}
/// Convert LSP position to byte offset
fn position_to_offset(doc: &Document, line: usize, character: usize) -> Option<usize> {
let line_start = doc.positions.line_offset(line)?;
Some(line_start + character)
}
/// Check if we're typing a new identifier name after a declaration keyword
fn is_typing_declaration_name(text: &str, offset: usize) -> bool {
use crate::syntax::lexer::{
Lexer,
Token,
};
// Get text before cursor (up to 200 chars)
let start = offset.saturating_sub(200);
let before = &text[start..offset.min(text.len())];
// Tokenize using lexer
let lexer = Lexer::new(before);
let tokens: Vec<_> = lexer.collect();
// Check if the last token (or second-to-last if we just typed an identifier)
// is a declaration keyword
if !tokens.is_empty() {
let last_idx = tokens.len() - 1;
// Check last token
if let (_offset, Token::Ident(keyword), _end) = &tokens[last_idx] {
if matches!(
keyword.as_str(),
"character" |
"template" |
"species" |
"behavior" |
"life_arc" |
"relationship" |
"institution" |
"location" |
"enum" |
"schedule"
) {
return true;
}
}
// Check second-to-last token (in case we're in the middle of typing an
// identifier)
if tokens.len() >= 2 {
let second_last_idx = tokens.len() - 2;
if let (_offset, Token::Ident(keyword), _end) = &tokens[second_last_idx] {
if matches!(
keyword.as_str(),
"character" |
"template" |
"species" |
"behavior" |
"life_arc" |
"relationship" |
"institution" |
"location" |
"enum" |
"schedule"
) {
// Make sure the last token is an identifier (not a colon or brace)
if let (_offset, Token::Ident(_), _end) = &tokens[last_idx] {
return true;
}
}
}
}
}
false
}
/// Format a value as its type string for documentation
fn format_value_type(value: &Value) -> String {
match value {
| Value::Identifier(path) => path.join("."),
| Value::String(_) => "String".to_string(),
| Value::Int(_) => "Int".to_string(),
| Value::Float(_) => "Float".to_string(),
| Value::Bool(_) => "Bool".to_string(),
| Value::List(items) => {
if items.is_empty() {
"List".to_string()
} else {
format!("[{}]", format_value_type(&items[0]))
}
},
| Value::Object(_) => "Object".to_string(),
| Value::Range(start, end) => {
format!("{}..{}", format_value_type(start), format_value_type(end))
},
| Value::Time(_) => "Time".to_string(),
| Value::Duration(_) => "Duration".to_string(),
| Value::ProseBlock(_) => "ProseBlock".to_string(),
| Value::Override(_) => "Override".to_string(),
}
}
/// Get field completions based on the current character/template context
fn get_contextual_field_completions(doc: &Document, offset: usize) -> Option<Vec<CompletionItem>> {
use crate::{
resolve::names::DeclKind,
syntax::ast::Declaration,
};
let ast = doc.ast.as_ref()?;
// Find which declaration contains the cursor offset
for decl in &ast.declarations {
match decl {
| Declaration::Character(character) => {
// Check if cursor is inside this character block
if offset >= character.span.start && offset <= character.span.end {
let mut items = Vec::new();
// Add special keywords
items.push(simple_item(
"from",
"Apply a template",
"from ${1:TemplateName}",
));
items.push(simple_item(
"include",
"Include a template",
"include ${1:TemplateName}",
));
// Add fields from species
if let Some(ref species_name) = character.species {
if let Some(species_entry) = doc.name_table.resolve_name(species_name) {
if species_entry.kind == DeclKind::Species {
for species_decl in &ast.declarations {
if let Declaration::Species(species) = species_decl {
if &species.name == species_name {
for field in &species.fields {
items.push(CompletionItem {
label: format!("{}:", field.name),
kind: Some(CompletionItemKind::FIELD),
detail: Some(format!("({})", species_name)),
insert_text: Some(format!("{}: $0", field.name)),
insert_text_format: Some(tower_lsp::lsp_types::InsertTextFormat::SNIPPET),
..Default::default()
});
}
}
}
}
}
}
}
return Some(items);
}
},
| Declaration::Template(template) => {
// Check if cursor is inside this template block
if offset >= template.span.start && offset <= template.span.end {
let mut items = Vec::new();
// Add special keywords for templates
items.push(simple_item(
"include",
"Include a template",
"include ${1:TemplateName}",
));
// Templates can suggest common field patterns
return Some(items);
}
},
| _ => {},
}
}
None
}
/// Get field completions when triggered by `.` using lexer
fn get_field_accessor_completions(
doc: &Document,
position: tower_lsp::lsp_types::Position,
) -> Option<Vec<CompletionItem>> {
use crate::{
resolve::names::DeclKind,
syntax::{
ast::Declaration,
lexer::{
Lexer,
Token,
},
},
};
// Lex the line up to the cursor to find the identifier before the dot
let line_offset = doc.positions.line_offset(position.line as usize)?;
let line_end = (line_offset + position.character as usize).min(doc.text.len());
let line_text = &doc.text[line_offset..line_end];
// Lex tokens on this line
let lexer = Lexer::new(line_text);
let tokens: Vec<_> = lexer.collect();
// Check if there's a dot token - if not, this isn't a field accessor
let has_dot = tokens
.iter()
.any(|(_, token, _)| matches!(token, Token::Dot));
if !has_dot {
return None;
}
// Find the last identifier before the last dot
let mut last_ident = None;
for (_start, token, _end) in &tokens {
match token {
| Token::Ident(name) => last_ident = Some(name.clone()),
| Token::Dot => {
// We found a dot - if we have an identifier, that's our target
if last_ident.is_some() {
break;
}
},
| _ => {},
}
}
// If there's a dot but no identifier, return empty list to block keywords
let identifier = match last_ident {
| Some(id) => id,
| None => return Some(Vec::new()),
};
// Look up the identifier - if it fails, still return empty to block keywords
let entry = match doc.name_table.resolve_name(&identifier) {
| Some(e) => e,
| None => return Some(Vec::new()),
};
let ast = match doc.ast.as_ref() {
| Some(a) => a,
| None => return Some(Vec::new()),
};
let mut items = Vec::new();
match entry.kind {
| DeclKind::Character => {
for decl in &ast.declarations {
if let Declaration::Character(character) = decl {
if character.name == identifier {
// Add character's own fields
for field in &character.fields {
let value_type = format_value_type(&field.value);
items.push(CompletionItem {
label: field.name.clone(),
kind: Some(CompletionItemKind::FIELD),
detail: None, // Keep inline display clean
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: format!(
"**Field** of `{}`\n\nType: `{}`",
identifier, value_type
),
})),
..Default::default()
});
}
// Add species fields
if let Some(ref species_name) = character.species {
if let Some(species_entry) = doc.name_table.resolve_name(species_name) {
if species_entry.kind == DeclKind::Species {
for decl in &ast.declarations {
if let Declaration::Species(species) = decl {
if &species.name == species_name {
for field in &species.fields {
let value_type =
format_value_type(&field.value);
items.push(CompletionItem {
label: field.name.clone(),
kind: Some(CompletionItemKind::FIELD),
detail: Some(format!("({})", species_name)),
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: format!("**Trait** from `{}`\n\nType: `{}`", species_name, value_type),
})),
..Default::default()
});
}
}
}
}
}
}
}
// Add template fields
if let Some(ref template_names) = character.template {
for template_name in template_names {
if let Some(template_entry) =
doc.name_table.resolve_name(template_name)
{
if template_entry.kind == DeclKind::Template {
for decl in &ast.declarations {
if let Declaration::Template(template) = decl {
if &template.name == template_name {
for field in &template.fields {
let value_type =
format_value_type(&field.value);
items.push(CompletionItem {
label: field.name.clone(),
kind: Some(CompletionItemKind::FIELD),
detail: Some(format!("({})", template_name)),
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: format!("**Template field** from `{}`\n\nType: `{}`", template_name, value_type),
})),
..Default::default()
});
}
}
}
}
}
}
}
}
break;
}
}
}
},
// For non-character declarations, still return empty list to block keywords
| _ => return Some(Vec::new()),
}
// Always return Some to block keyword completions, even if no fields found
items.sort_by(|a, b| a.label.cmp(&b.label));
Some(items)
}
/// Determine completion context by analyzing tokens around cursor using lexer
fn determine_context(text: &str, offset: usize) -> CompletionContext {
use crate::syntax::lexer::{
Lexer,
Token,
};
// Get text before cursor (up to 500 chars for context)
let start = offset.saturating_sub(500);
let before = &text[start..offset.min(text.len())];
// Tokenize using lexer
let lexer = Lexer::new(before);
let tokens: Vec<_> = lexer.collect();
// Track state by analyzing tokens
let mut nesting_level: i32 = 0;
let mut last_keyword = None;
let mut seen_colon_without_brace = false;
for (_offset, token, _end) in &tokens {
match token {
| Token::LBrace => nesting_level += 1,
| Token::RBrace => nesting_level = nesting_level.saturating_sub(1),
| Token::Colon => {
// Mark that we've seen a colon
seen_colon_without_brace = true;
},
| Token::LBrace if seen_colon_without_brace => {
// Opening brace after colon - we've entered the block
seen_colon_without_brace = false;
},
| Token::Ident(keyword)
if matches!(
keyword.as_str(),
"character" |
"template" |
"species" |
"behavior" |
"life_arc" |
"relationship" |
"institution" |
"location" |
"enum" |
"schedule"
) =>
{
last_keyword = Some(keyword.clone());
seen_colon_without_brace = false;
},
| _ => {},
}
}
// If we saw a colon without a brace after it, we're in type position
if seen_colon_without_brace {
return CompletionContext::AfterColon;
}
// At top level if no nesting
if nesting_level == 0 {
return CompletionContext::TopLevel;
}
// Determine context based on last keyword and nesting
match last_keyword.as_deref() {
| Some("behavior") if nesting_level > 0 => CompletionContext::InBehavior,
| Some("life_arc") if nesting_level > 0 => CompletionContext::InLifeArc,
| Some("relationship") if nesting_level > 0 => CompletionContext::InRelationship,
| Some("character" | "template" | "species" | "institution" | "location")
if nesting_level > 0 =>
{
CompletionContext::InFieldBlock
},
| _ => CompletionContext::Unknown,
}
}
/// Get entity completions (all symbols)
fn entity_completions(doc: &Document) -> Vec<CompletionItem> {
use crate::resolve::names::DeclKind;
let mut items = Vec::new();
for entry in doc.name_table.all_entries() {
let kind = match entry.kind {
| DeclKind::Character => CompletionItemKind::CLASS,
| DeclKind::Template => CompletionItemKind::INTERFACE,
| DeclKind::LifeArc => CompletionItemKind::FUNCTION,
| DeclKind::Schedule => CompletionItemKind::EVENT,
| DeclKind::Behavior => CompletionItemKind::MODULE,
| DeclKind::Institution => CompletionItemKind::MODULE,
| DeclKind::Relationship => CompletionItemKind::STRUCT,
| DeclKind::Location => CompletionItemKind::CONSTANT,
| DeclKind::Species => CompletionItemKind::CLASS,
| DeclKind::Enum => CompletionItemKind::ENUM,
};
let name = entry
.qualified_path
.last()
.unwrap_or(&String::new())
.clone();
items.push(CompletionItem {
label: name,
kind: Some(kind),
detail: Some(format!("{:?}", entry.kind)),
..Default::default()
});
}
items
}
/// Get type completions (templates and species)
fn type_completions(doc: &Document) -> Vec<CompletionItem> {
use crate::resolve::names::DeclKind;
let mut items = Vec::new();
for entry in doc.name_table.all_entries() {
match entry.kind {
| DeclKind::Template | DeclKind::Species => {
let name = entry
.qualified_path
.last()
.unwrap_or(&String::new())
.clone();
items.push(CompletionItem {
label: name,
kind: Some(CompletionItemKind::INTERFACE),
detail: Some(format!("{:?}", entry.kind)),
documentation: Some(Documentation::String("Type annotation".to_string())),
..Default::default()
});
},
| _ => {},
}
}
items
}
/// Get behavior completions
fn behavior_completions(doc: &Document) -> Vec<CompletionItem> {
use crate::resolve::names::DeclKind;
let mut items = Vec::new();
for entry in doc.name_table.entries_of_kind(DeclKind::Behavior) {
let name = entry
.qualified_path
.last()
.unwrap_or(&String::new())
.clone();
items.push(CompletionItem {
label: format!("@{}", name),
kind: Some(CompletionItemKind::REFERENCE),
detail: Some("Behavior tree reference".to_string()),
insert_text: Some(format!("@{}", name)),
..Default::default()
});
}
items
}
/// Get character completions
fn character_completions(doc: &Document) -> Vec<CompletionItem> {
use crate::resolve::names::DeclKind;
let mut items = Vec::new();
for entry in doc.name_table.entries_of_kind(DeclKind::Character) {
let name = entry
.qualified_path
.last()
.unwrap_or(&String::new())
.clone();
items.push(CompletionItem {
label: name,
kind: Some(CompletionItemKind::CLASS),
detail: Some("Character".to_string()),
..Default::default()
});
}
items
}
/// Get all keyword completions (fallback)
fn all_keyword_completions() -> Vec<CompletionItem> {
let mut items = top_level_keyword_completions();
items.extend(field_keyword_completions());
items.extend(behavior_keyword_completions());
items.extend(life_arc_keyword_completions());
items.extend(relationship_keyword_completions());
items
}
/// Get top-level declaration keywords
fn top_level_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item("character", "Define a character entity", "character ${1:Name}: ${2:Species} {\n $0\n}"),
keyword_item("template", "Define a reusable field template", "template ${1:Name} {\n $0\n}"),
keyword_item("life_arc", "Define a state machine", "life_arc ${1:Name} {\n state ${2:initial} {\n $0\n }\n}"),
keyword_item("schedule", "Define a daily schedule", "schedule ${1:Name} {\n ${2:08:00} -> ${3:09:00}: ${4:block_name} {\n $0\n }\n}"),
keyword_item("behavior", "Define a behavior tree", "behavior ${1:Name} {\n $0\n}"),
keyword_item("institution", "Define an organization", "institution ${1:Name} {\n $0\n}"),
keyword_item("relationship", "Define a relationship", "relationship ${1:Name} {\n $0\n}"),
keyword_item("location", "Define a location", "location ${1:Name} {\n $0\n}"),
keyword_item("species", "Define a species", "species ${1:Name} {\n $0\n}"),
keyword_item("enum", "Define an enumeration", "enum ${1:Name} {\n ${2:Value1}\n ${3:Value2}\n}"),
keyword_item("use", "Import declarations", "use ${1:path::to::item};"),
]
}
/// Get field-level keywords
fn field_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item("from", "Apply a template", "from ${1:TemplateName}"),
keyword_item("include", "Include a template", "include ${1:TemplateName}"),
keyword_item("strict", "Enforce strict template fields", "strict"),
// Common field names
simple_item("age", "Age field", "age: ${1:0}"),
simple_item("name", "Name field", "name: \"${1:Name}\""),
simple_item("bond", "Bond trait (0.0-1.0)", "bond: ${1:0.5}"),
simple_item("trust", "Trust trait (0.0-1.0)", "trust: ${1:0.5}"),
simple_item("love", "Love trait (0.0-1.0)", "love: ${1:0.5}"),
]
}
/// Get behavior tree keywords
fn behavior_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item(
"?",
"Selector node (try options in order)",
"? {\n $0\n}",
),
keyword_item(">", "Sequence node (execute in order)", "> {\n $0\n}"),
keyword_item("*", "Repeat node (loop forever)", "* {\n $0\n}"),
simple_item("@", "Subtree reference", "@${1:behavior::name}"),
]
}
/// Get life arc keywords
fn life_arc_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item(
"state",
"Define a life arc state",
"state ${1:name} {\n $0\n}",
),
keyword_item(
"on",
"Define a transition",
"on ${1:condition} -> ${2:target_state}",
),
]
}
/// Get relationship keywords
fn relationship_keyword_completions() -> Vec<CompletionItem> {
vec![
keyword_item(
"as",
"Define participant role",
"${1:CharacterName} as ${2:role} {\n $0\n}",
),
keyword_item("self", "Reference self in relationships", "self.${1:field}"),
keyword_item("other", "Reference other participant", "other.${1:field}"),
]
}
fn keyword_item(label: &str, detail: &str, snippet: &str) -> CompletionItem {
CompletionItem {
label: label.to_string(),
kind: Some(CompletionItemKind::KEYWORD),
detail: Some(detail.to_string()),
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: format!("**{}**\n\n{}", label, detail),
})),
insert_text: Some(snippet.to_string()),
insert_text_format: Some(tower_lsp::lsp_types::InsertTextFormat::SNIPPET),
..Default::default()
}
}
fn simple_item(label: &str, detail: &str, snippet: &str) -> CompletionItem {
CompletionItem {
label: label.to_string(),
kind: Some(CompletionItemKind::PROPERTY),
detail: Some(detail.to_string()),
insert_text: Some(snippet.to_string()),
insert_text_format: Some(tower_lsp::lsp_types::InsertTextFormat::SNIPPET),
..Default::default()
}
}

282
src/lsp/completion_tests.rs Normal file
View File

@@ -0,0 +1,282 @@
//! Tests for context-aware completion
#[cfg(test)]
mod tests {
use tower_lsp::lsp_types::{
CompletionParams,
Position,
TextDocumentIdentifier,
TextDocumentPositionParams,
Url,
};
use crate::lsp::{
completion,
document::Document,
};
fn make_params(line: u32, character: u32) -> CompletionParams {
CompletionParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position { line, character },
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: None,
}
}
#[test]
fn test_top_level_completions() {
let doc = Document::new("".to_string());
let params = make_params(0, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have top-level keywords
assert!(items.iter().any(|item| item.label == "character"));
assert!(items.iter().any(|item| item.label == "template"));
assert!(items.iter().any(|item| item.label == "behavior"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_field_block_completions() {
let source = "character Alice {\n \n}";
let doc = Document::new(source.to_string());
// Position inside the character block (line 1, after spaces)
let params = make_params(1, 4);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have field-related keywords
assert!(items.iter().any(|item| item.label == "from"));
assert!(items.iter().any(|item| item.label == "age"));
assert!(items.iter().any(|item| item.label == "bond"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_completions_include_templates() {
// Test that templates show up in completions
let source = "template Child { age: number }\n";
let doc = Document::new(source.to_string());
let params = make_params(1, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should include Child template in completions
assert!(
items.iter().any(|item| item.label == "Child"),
"Should have Child template in completions"
);
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_behavior_completions() {
let source = "behavior Test {\n \n}";
let doc = Document::new(source.to_string());
// Position inside behavior block
let params = make_params(1, 4);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have behavior tree keywords
assert!(items.iter().any(|item| item.label == "?"));
assert!(items.iter().any(|item| item.label == ">"));
assert!(items.iter().any(|item| item.label == "*"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_life_arc_completions() {
let source = "life_arc Growing {\n \n}";
let doc = Document::new(source.to_string());
// Position inside life arc block
let params = make_params(1, 4);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have life arc keywords
assert!(items.iter().any(|item| item.label == "state"));
assert!(items.iter().any(|item| item.label == "on"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_relationship_completions() {
let source = "relationship Friends {\n \n}";
let doc = Document::new(source.to_string());
// Position inside relationship block
let params = make_params(1, 4);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have relationship keywords
assert!(items.iter().any(|item| item.label == "as"));
assert!(items.iter().any(|item| item.label == "self"));
assert!(items.iter().any(|item| item.label == "other"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_type_suggestions_in_completions() {
// More complete example with proper syntax
let source = r#"template Child { age: number }
species Human {}
character Alice: Child {}
character Bob {}"#;
let doc = Document::new(source.to_string());
// Just check that templates and species are in completions
let params = make_params(0, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Should have Child and Human in completions
assert!(items.iter().any(|item| item.label == "Child"));
assert!(items.iter().any(|item| item.label == "Human"));
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_behavior_reference_in_symbols() {
// Check that behaviors are in symbol table and show up in completions
let source = "behavior WalkAround { patrol }\nbehavior Main { idle }";
let doc = Document::new(source.to_string());
let params = make_params(0, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Behaviors should be in completions
assert!(
items.iter().any(|item| item.label.contains("WalkAround")),
"Should have WalkAround in completions"
);
assert!(
items.iter().any(|item| item.label.contains("Main")),
"Should have Main in completions"
);
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_snippet_format_in_completions() {
let doc = Document::new("".to_string());
let params = make_params(0, 0);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Check that snippets have proper format
let char_item = items.iter().find(|item| item.label == "character");
assert!(char_item.is_some());
if let Some(item) = char_item {
assert!(item.insert_text.is_some());
assert!(item.insert_text_format.is_some());
// Should contain snippet placeholders
assert!(item.insert_text.as_ref().unwrap().contains("${"));
}
},
| _ => panic!("Expected array response"),
}
}
}
#[test]
fn test_no_duplicate_completions() {
let source = "character Alice {}\ncharacter Alice {}"; // Duplicate name
let doc = Document::new(source.to_string());
let params = make_params(0, 0);
// Duplicate definitions cause NameTable::from_file() to fail,
// resulting in an empty name table and no completions.
// This is correct - duplicates should be caught as validation errors.
assert!(
!doc.resolve_errors.is_empty(),
"Should have validation error for duplicate"
);
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::CompletionResponse::Array(items) => {
// Count how many times "Alice" appears
let alice_count = items.iter().filter(|item| item.label == "Alice").count();
assert_eq!(
alice_count, 0,
"Should have no completions when there are duplicate definitions"
);
},
| _ => panic!("Expected array response"),
}
}
}
}

55
src/lsp/definition.rs Normal file
View File

@@ -0,0 +1,55 @@
//! Go-to-definition provider
//!
//! Allows jumping to the definition of symbols
use tower_lsp::lsp_types::{
GotoDefinitionParams,
GotoDefinitionResponse,
Location,
Range,
Url,
};
use super::document::Document;
/// Get the definition location for a symbol at a position
pub fn get_definition(
doc: &Document,
params: &GotoDefinitionParams,
uri: &Url,
) -> Option<GotoDefinitionResponse> {
let position = params.text_document_position_params.position;
// Convert LSP position to byte offset
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Get the word at the cursor
let word = doc.word_at_offset(offset)?;
// Look up the symbol in the name table
let entry = doc.name_table.resolve_name(&word)?;
let mut positions = doc.positions.clone();
let (start_line, start_col) = positions.offset_to_position(entry.span.start);
let (end_line, end_col) = positions.offset_to_position(entry.span.end);
Some(GotoDefinitionResponse::Scalar(Location {
uri: uri.clone(),
range: Range {
start: tower_lsp::lsp_types::Position {
line: start_line as u32,
character: start_col as u32,
},
end: tower_lsp::lsp_types::Position {
line: end_line as u32,
character: end_col as u32,
},
},
}))
}
/// Convert LSP position to byte offset
fn position_to_offset(doc: &Document, line: usize, character: usize) -> Option<usize> {
let line_start = doc.positions.line_offset(line)?;
Some(line_start + character)
}

163
src/lsp/diagnostics.rs Normal file
View File

@@ -0,0 +1,163 @@
//! Diagnostics conversion from Storybook errors to LSP diagnostics
use tower_lsp::lsp_types::{
Diagnostic,
DiagnosticSeverity,
Position,
Range,
};
use crate::syntax::lexer::{
Lexer,
Token,
};
/// Compute diagnostics for a document
pub fn compute_diagnostics(text: &str) -> Vec<Diagnostic> {
let mut diagnostics = Vec::new();
// Try to parse the document
// For now, we'll do a simple check - a real implementation would use the full
// parser
match try_parse(text) {
| Ok(_) => {
// No syntax errors
},
| Err(errors) => {
for error in errors {
diagnostics.push(error);
}
},
}
diagnostics
}
/// Attempt to parse the document and return diagnostics
fn try_parse(text: &str) -> Result<(), Vec<Diagnostic>> {
// TODO: Integrate with actual parser
// For now, return Ok for all documents
// This will be implemented when we add Span tracking
// Simple placeholder: check for common syntax errors using lexer tokens
let mut errors = Vec::new();
// Track brace nesting level and position of each brace
let mut nesting_stack: Vec<usize> = Vec::new(); // Stack of opening brace positions
let lexer = Lexer::new(text);
for (offset, token, _end) in lexer {
match token {
| Token::LBrace => {
nesting_stack.push(offset);
},
| Token::RBrace => {
if nesting_stack.is_empty() {
// Unexpected closing brace - no matching opening brace
let pos = byte_offset_to_position(text, offset);
errors.push(Diagnostic {
range: Range {
start: pos,
end: Position {
line: pos.line,
character: pos.character + 1,
},
},
severity: Some(DiagnosticSeverity::ERROR),
code: None,
source: Some("storybook".to_string()),
message: "Unexpected closing brace".to_string(),
related_information: None,
tags: None,
code_description: None,
data: None,
});
} else {
nesting_stack.pop();
}
},
| _ => {},
}
}
// Note: We don't report unclosed braces (nesting_stack not empty)
// because those are common in incomplete/in-progress code
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
fn byte_offset_to_line(text: &str, offset: usize) -> usize {
let mut line = 0;
let mut current_offset = 0;
for ch in text.chars() {
if current_offset >= offset {
break;
}
if ch == '\n' {
line += 1;
}
current_offset += ch.len_utf8();
}
line
}
/// Convert a byte offset to line/column position
/// This is a placeholder - will be replaced when we have proper Span tracking
pub fn byte_offset_to_position(text: &str, offset: usize) -> Position {
let mut line = 0;
let mut character = 0;
let mut current_offset = 0;
for ch in text.chars() {
if current_offset >= offset {
break;
}
if ch == '\n' {
line += 1;
character = 0;
} else {
character += 1;
}
current_offset += ch.len_utf8();
}
Position {
line: line as u32,
character: character as u32,
}
}
/// Create a diagnostic from a span and message
pub fn create_diagnostic(
text: &str,
start: usize,
end: usize,
message: String,
severity: DiagnosticSeverity,
) -> Diagnostic {
Diagnostic {
range: Range {
start: byte_offset_to_position(text, start),
end: byte_offset_to_position(text, end),
},
severity: Some(severity),
code: None,
source: Some("storybook".to_string()),
message,
related_information: None,
tags: None,
code_description: None,
data: None,
}
}

View File

@@ -0,0 +1,271 @@
//! Tests for diagnostics functionality
#[cfg(test)]
mod tests {
use tower_lsp::lsp_types::DiagnosticSeverity;
use crate::lsp::{
diagnostics,
document::Document,
};
#[test]
fn test_parse_error_diagnostics() {
let invalid = "character { missing name }";
let doc = Document::new(invalid.to_string());
assert!(!doc.parse_errors.is_empty(), "Should have parse errors");
assert!(doc.ast.is_none(), "Should not have AST");
}
#[test]
fn test_multiple_parse_errors() {
let invalid = r#"
character {
}
template {
}
invalid syntax here
"#;
let doc = Document::new(invalid.to_string());
// Should report errors (may be combined into one or multiple)
assert!(!doc.parse_errors.is_empty());
}
#[test]
fn test_no_errors_for_valid_code() {
let valid = "character Alice { age: 7 }";
let doc = Document::new(valid.to_string());
assert!(
doc.parse_errors.is_empty(),
"Valid code should have no errors"
);
assert!(doc.ast.is_some(), "Should have parsed AST");
}
#[test]
fn test_error_positions() {
let invalid = "character Alice { age: }"; // Missing value
let doc = Document::new(invalid.to_string());
if !doc.parse_errors.is_empty() {
let error = &doc.parse_errors[0];
// Error should have valid position
assert!(error.start < invalid.len());
assert!(error.end <= invalid.len());
assert!(error.start <= error.end);
}
}
#[test]
fn test_error_messages_are_descriptive() {
let invalid = "character Alice { age: }";
let doc = Document::new(invalid.to_string());
if !doc.parse_errors.is_empty() {
let error = &doc.parse_errors[0];
assert!(
!error.message.is_empty(),
"Error message should not be empty"
);
assert!(
error.message.len() > 10,
"Error message should be descriptive"
);
}
}
#[test]
fn test_error_severity() {
use crate::lsp::document::ErrorSeverity;
let invalid = "character { }";
let doc = Document::new(invalid.to_string());
if !doc.parse_errors.is_empty() {
let error = &doc.parse_errors[0];
// Parse errors should be Error severity
assert!(matches!(error.severity, ErrorSeverity::Error));
}
}
#[test]
fn test_unclosed_brace_error() {
let invalid = "character Alice { age: 7";
let doc = Document::new(invalid.to_string());
assert!(!doc.parse_errors.is_empty(), "Should detect unclosed brace");
}
#[test]
fn test_unexpected_token_error() {
let invalid = "character Alice } age: 7 {"; // Backwards braces
let doc = Document::new(invalid.to_string());
assert!(
!doc.parse_errors.is_empty(),
"Should detect unexpected tokens"
);
}
#[test]
fn test_invalid_field_syntax() {
let invalid = "character Alice { age = 7 }"; // Wrong operator
let doc = Document::new(invalid.to_string());
// May or may not parse depending on parser flexibility
// Just verify we handle it gracefully
assert!(doc.ast.is_some() || !doc.parse_errors.is_empty());
}
#[test]
fn test_empty_input() {
let doc = Document::new("".to_string());
// Empty input is valid - just no declarations
assert!(doc.parse_errors.is_empty());
}
#[test]
fn test_whitespace_only() {
let doc = Document::new(" \n\n\t\t ".to_string());
// Whitespace only is valid
assert!(doc.parse_errors.is_empty());
}
#[test]
fn test_comments_only() {
let doc = Document::new("// Just comments\n// Nothing else".to_string());
// Comments only is valid
assert!(doc.parse_errors.is_empty());
}
// Tests for diagnostics utility functions
#[test]
fn test_byte_offset_to_position_start() {
let text = "hello world";
let pos = diagnostics::byte_offset_to_position(text, 0);
assert_eq!(pos.line, 0);
assert_eq!(pos.character, 0);
}
#[test]
fn test_byte_offset_to_position_middle() {
let text = "hello world";
let pos = diagnostics::byte_offset_to_position(text, 6);
assert_eq!(pos.line, 0);
assert_eq!(pos.character, 6);
}
#[test]
fn test_byte_offset_to_position_multiline() {
let text = "line 1\nline 2\nline 3";
let pos = diagnostics::byte_offset_to_position(text, 7); // Start of line 2
assert_eq!(pos.line, 1);
assert_eq!(pos.character, 0);
}
#[test]
fn test_byte_offset_to_position_beyond_text() {
let text = "short";
let pos = diagnostics::byte_offset_to_position(text, 1000);
// Should not panic, returns position at end (line is always valid u32)
assert!(pos.line == 0 || pos.line > 0);
}
#[test]
fn test_create_diagnostic() {
let text = "character Alice {}";
let diag = diagnostics::create_diagnostic(
text,
0,
9,
"Test message".to_string(),
DiagnosticSeverity::ERROR,
);
assert_eq!(diag.message, "Test message");
assert_eq!(diag.severity, Some(DiagnosticSeverity::ERROR));
assert_eq!(diag.source, Some("storybook".to_string()));
}
#[test]
fn test_create_diagnostic_with_warning() {
let text = "test";
let diag = diagnostics::create_diagnostic(
text,
0,
4,
"Warning message".to_string(),
DiagnosticSeverity::WARNING,
);
assert_eq!(diag.severity, Some(DiagnosticSeverity::WARNING));
}
#[test]
fn test_compute_diagnostics_valid() {
let text = "character Alice { age: 7 }";
let diags = diagnostics::compute_diagnostics(text);
// Valid code should produce no diagnostics from the placeholder implementation
// (The real diagnostics come from document.rs)
// Just verify the function runs without panicking - result may vary
let _ = diags;
}
#[test]
fn test_compute_diagnostics_unclosed_brace() {
let text = "} extra closing brace";
let diags = diagnostics::compute_diagnostics(text);
// Should detect the unexpected closing brace
assert!(!diags.is_empty());
assert!(diags[0].message.contains("closing brace"));
}
#[test]
fn test_braces_in_strings_are_ignored() {
// This was the bug: braces inside string literals were being counted
let text = r#"character Alice {
description: "A person with { and } in their bio"
age: 7
}"#;
let diags = diagnostics::compute_diagnostics(text);
// Should NOT report any errors - braces in strings should be ignored
assert!(
diags.is_empty(),
"Braces in strings should not trigger errors"
);
}
#[test]
fn test_actual_unmatched_brace_detected() {
// But actual unmatched braces should still be detected
let text = r#"character Alice {
age: 7
}
}"#; // Extra closing brace
let diags = diagnostics::compute_diagnostics(text);
assert!(
!diags.is_empty(),
"Actual unmatched braces should be detected"
);
assert!(diags[0].message.contains("closing brace"));
}
#[test]
fn test_braces_in_comments_are_ignored() {
// Comments should also be ignored
let text = r#"character Alice {
age: 7 // This has { and } in comment
}"#;
let diags = diagnostics::compute_diagnostics(text);
assert!(
diags.is_empty(),
"Braces in comments should not trigger errors"
);
}
}

419
src/lsp/document.rs Normal file
View File

@@ -0,0 +1,419 @@
//! Document state management for LSP
//!
//! This module manages parsed document state including AST, position tracking,
//! and symbol tables for efficient LSP operations.
use crate::{
position::PositionTracker,
resolve::{
names::NameTable,
validate,
ErrorCollector,
ResolveError,
},
syntax::{
ast::File,
lexer::Lexer,
FileParser,
},
};
/// Parsed document with all necessary state for LSP operations
pub struct Document {
/// The source text
pub text: String,
/// Position tracker for offset -> line/col conversion
pub positions: PositionTracker,
/// Parsed AST (if parsing succeeded)
pub ast: Option<File>,
/// Parse errors
pub parse_errors: Vec<ParseError>,
/// Semantic validation errors from resolver
pub resolve_errors: Vec<ResolveError>,
/// Name table from the language's semantic resolution
pub name_table: NameTable,
}
#[derive(Debug, Clone)]
pub struct ParseError {
pub start: usize,
pub end: usize,
pub message: String,
pub severity: ErrorSeverity,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ErrorSeverity {
Error,
Warning,
}
impl Document {
/// Create a new document from source text
pub fn new(text: String) -> Self {
let positions = PositionTracker::new(&text);
// Parse the document
let (ast, parse_errors) = Self::parse(&text);
// Build name table and run semantic validation
let (name_table, resolve_errors) = if let Some(ref ast) = ast {
// Use language's NameTable for semantic resolution
let mut resolve_errors = Vec::new();
let name_table = match NameTable::from_file(ast) {
| Ok(table) => table,
| Err(e) => {
// Capture NameTable errors (e.g., duplicate definitions)
resolve_errors.push(e);
NameTable::new()
},
};
// Also run validation
resolve_errors.extend(Self::validate(ast));
(name_table, resolve_errors)
} else {
(NameTable::new(), Vec::new())
};
Self {
text,
positions,
ast,
parse_errors,
resolve_errors,
name_table,
}
}
/// Update the document with new text
pub fn update(&mut self, text: String) {
self.text = text;
self.positions = PositionTracker::new(&self.text);
let (ast, parse_errors) = Self::parse(&self.text);
self.ast = ast;
self.parse_errors = parse_errors;
let (name_table, resolve_errors) = if let Some(ref ast) = self.ast {
// Use language's NameTable for semantic resolution
let mut resolve_errors = Vec::new();
let name_table = match NameTable::from_file(ast) {
| Ok(table) => table,
| Err(e) => {
// Capture NameTable errors (e.g., duplicate definitions)
resolve_errors.push(e);
NameTable::new()
},
};
// Also run validation
resolve_errors.extend(Self::validate(ast));
(name_table, resolve_errors)
} else {
(NameTable::new(), Vec::new())
};
self.name_table = name_table;
self.resolve_errors = resolve_errors;
}
/// Run semantic validation on the AST
fn validate(ast: &File) -> Vec<ResolveError> {
let mut collector = ErrorCollector::default();
// Validate each declaration type
for decl in &ast.declarations {
use crate::syntax::ast::Declaration;
match decl {
| Declaration::Character(c) => {
validate::validate_no_reserved_keywords(&c.fields, &mut collector);
validate::validate_trait_ranges(&c.fields, &mut collector);
},
| Declaration::Template(t) => {
validate::validate_no_reserved_keywords(&t.fields, &mut collector);
},
| Declaration::LifeArc(l) => {
validate::validate_life_arc_transitions(l, &mut collector);
},
| Declaration::Schedule(s) => {
validate::validate_schedule_overlaps(s, &mut collector);
},
| Declaration::Behavior(_b) => {
// Behavior validation requires action registry, skip for
// now TODO: Add action registry support
},
| Declaration::Relationship(_r) => {
// Relationship bond validation happens at a different level
// (requires multiple relationships), skip for now
},
| _ => {},
}
}
// Extract errors from collector
// Since ErrorCollector doesn't expose errors directly, we use into_result and
// extract
match collector.into_result(()) {
| Ok(_) => Vec::new(),
| Err(ResolveError::MultipleErrors { errors, .. }) => errors,
| Err(e) => vec![e],
}
}
/// Lex text up to a position and return tokens with their spans
/// Returns Vec<(start, token, end)>
fn lex_until(text: &str, until: usize) -> Vec<(usize, crate::syntax::lexer::Token, usize)> {
let lexer = Lexer::new(&text[..until.min(text.len())]);
lexer.collect()
}
/// Parse the source text
fn parse(text: &str) -> (Option<File>, Vec<ParseError>) {
let lexer = Lexer::new(text);
let parser = FileParser::new();
match parser.parse(lexer) {
| Ok(file) => (Some(file), Vec::new()),
| Err(e) => {
use lalrpop_util::ParseError as LalrpopError;
let (start, end, message) = match e {
| LalrpopError::InvalidToken { location } => {
(location, location + 1, "Invalid token".to_string())
},
| LalrpopError::UnrecognizedEof { location, expected } => {
let expected_str = if expected.is_empty() {
String::new()
} else if expected.len() == 1 {
format!(" (expected {})", expected[0])
} else {
format!(" (expected one of: {})", expected.join(", "))
};
(
location,
location,
format!("Unexpected end of file{}", expected_str),
)
},
| LalrpopError::UnrecognizedToken {
token: (start, tok, end),
expected,
} => {
use crate::syntax::lexer::Token;
// Lex tokens up to the error position
let tokens = Self::lex_until(text, start);
// Check what we expected vs what we got
let expecting_close_brace = expected.iter().any(|e| e.contains("}"));
let expecting_comma = expected.iter().any(|e| e.contains(","));
let expecting_colon = expected.iter().any(|e| e.contains(":"));
let is_declaration_keyword = matches!(
tok,
Token::Character |
Token::Template |
Token::LifeArc |
Token::Schedule |
Token::Behavior |
Token::Institution |
Token::Relationship |
Token::Location |
Token::Species |
Token::Enum
);
let is_identifier = matches!(tok, Token::Ident(_));
// Case 1: Missing closing brace before new declaration
if expecting_close_brace && is_declaration_keyword {
// Find the last non-comment identifier token and the declaration
// keyword
let mut last_ident_name = None;
let mut last_ident_span = None;
let mut decl_keyword = None;
for (tok_start, token, tok_end) in tokens.iter().rev() {
match token {
| Token::Ident(name) if last_ident_name.is_none() => {
last_ident_name = Some(name.clone());
last_ident_span = Some((*tok_start, *tok_end));
},
| Token::Character |
Token::Template |
Token::LifeArc |
Token::Schedule |
Token::Behavior |
Token::Institution |
Token::Relationship |
Token::Location |
Token::Species |
Token::Enum => {
decl_keyword = Some(token);
break;
},
| _ => {},
}
}
if let (Some((ident_start, ident_end)), Some(keyword)) =
(last_ident_span, decl_keyword)
{
let decl_type = match keyword {
| Token::Character => "character",
| Token::Template => "template",
| Token::LifeArc => "life_arc",
| Token::Schedule => "schedule",
| Token::Behavior => "behavior",
| Token::Institution => "institution",
| Token::Relationship => "relationship",
| Token::Location => "location",
| Token::Species => "species",
| Token::Enum => "enum",
| _ => "declaration",
};
let decl_name =
last_ident_name.unwrap_or_else(|| "unknown".to_string());
let message = format!(
"Missing closing brace '}}' for {} {}",
decl_type, decl_name
);
(ident_start, ident_end, message)
} else {
(start, end, "Missing closing brace '}'".to_string())
}
}
// Case 2: Missing comma or colon before identifier
else if (expecting_comma || expecting_colon) && is_identifier {
// Find the last identifier token before the error
if let Some((tok_start, Token::Ident(name), tok_end)) = tokens
.iter()
.rev()
.find(|(_, t, _)| matches!(t, Token::Ident(_)))
{
let message = if expecting_comma {
format!("Missing comma after '{}'", name)
} else {
format!("Missing ':' after '{}'", name)
};
(*tok_start, *tok_end, message)
} else {
let message = if expecting_comma {
"Missing comma".to_string()
} else {
"Missing ':'".to_string()
};
(start, end, message)
}
}
// Case 3: Other errors
else {
let message = if expected.len() == 1 {
let expected_token = &expected[0];
if expected_token.contains(";") {
"Missing semicolon".to_string()
} else if expected_token.contains("}") {
"Missing closing brace '}'".to_string()
} else {
format!("Expected {}, found {:?}", expected_token, tok)
}
} else if !expected.is_empty() {
format!("Expected one of: {}, found {:?}", expected.join(", "), tok)
} else {
format!("Unexpected token {:?}", tok)
};
(start, end, message)
}
},
| LalrpopError::ExtraToken {
token: (start, tok, end),
} => (start, end, format!("Extra token {:?}", tok)),
| LalrpopError::User { error } => {
// Our custom ParseError already has span information
// Extract it if it's UnexpectedToken or other variants
use crate::syntax::ParseError as CustomParseError;
match error {
| CustomParseError::UnexpectedToken { token, span } => (
span.offset(),
span.offset() + span.len(),
format!("Unexpected token: {}", token),
),
| CustomParseError::UnexpectedEof { span } => (
span.offset(),
span.offset() + span.len(),
"Unexpected end of file".to_string(),
),
| CustomParseError::InvalidToken { span } => (
span.offset(),
span.offset() + span.len(),
"Invalid token".to_string(),
),
| CustomParseError::UnclosedProseBlock { tag, span } => (
span.offset(),
span.offset() + span.len(),
format!("Unclosed prose block: ---{}", tag),
),
}
},
};
let error = ParseError {
start,
end,
message,
severity: ErrorSeverity::Error,
};
(None, vec![error])
},
}
}
/// Get the word at a byte offset
pub fn word_at_offset(&self, offset: usize) -> Option<String> {
if offset > self.text.len() {
return None;
}
let chars: Vec<char> = self.text.chars().collect();
if chars.is_empty() {
return None;
}
let mut byte_offset = 0;
let mut char_index = 0;
// Find the character index for this byte offset
for (i, ch) in chars.iter().enumerate() {
if byte_offset == offset {
char_index = i;
break;
}
byte_offset += ch.len_utf8();
if i == chars.len() - 1 {
// Last character
char_index = i;
}
}
// Check if we're on a word character
if char_index >= chars.len() || !is_word_char(chars[char_index]) {
return None;
}
// Find word boundaries
let mut start = char_index;
while start > 0 && is_word_char(chars[start - 1]) {
start -= 1;
}
let mut end = char_index + 1;
while end < chars.len() && is_word_char(chars[end]) {
end += 1;
}
Some(chars[start..end].iter().collect())
}
}
fn is_word_char(c: char) -> bool {
c.is_alphanumeric() || c == '_'
}

View File

@@ -0,0 +1,191 @@
//! Edge case tests for document functionality
#[cfg(test)]
mod tests {
use crate::lsp::document::Document;
#[test]
fn test_word_at_offset_unicode() {
let doc = Document::new("character Café { age: 7 }".to_string());
// Test finding "Café"
let word = doc.word_at_offset(10);
assert_eq!(word, Some("Café".to_string()));
}
#[test]
fn test_word_at_offset_underscore() {
let doc = Document::new("character snake_case { }".to_string());
let word = doc.word_at_offset(12);
assert_eq!(word, Some("snake_case".to_string()));
}
#[test]
fn test_word_at_offset_at_start() {
let doc = Document::new("character Alice { }".to_string());
let word = doc.word_at_offset(0);
assert_eq!(word, Some("character".to_string()));
}
#[test]
fn test_word_at_offset_at_end() {
let doc = Document::new("character Alice".to_string());
let word = doc.word_at_offset(14);
assert_eq!(word, Some("Alice".to_string()));
}
#[test]
fn test_word_at_offset_out_of_bounds() {
let doc = Document::new("test".to_string());
let word = doc.word_at_offset(1000);
assert_eq!(word, None);
}
#[test]
fn test_word_at_offset_on_whitespace() {
let doc = Document::new("character Alice".to_string());
let word = doc.word_at_offset(9); // Space between character and Alice
assert_eq!(word, None);
}
#[test]
fn test_word_at_offset_on_punctuation() {
let doc = Document::new("character Alice { }".to_string());
let word = doc.word_at_offset(16); // On '{'
assert_eq!(word, None);
}
#[test]
fn test_update_clears_old_symbols() {
let mut doc = Document::new("character Alice {}".to_string());
assert!(doc.name_table.resolve_name("Alice").is_some());
doc.update("character Bob {}".to_string());
assert!(doc.name_table.resolve_name("Alice").is_none());
assert!(doc.name_table.resolve_name("Bob").is_some());
}
#[test]
fn test_update_with_invalid_syntax() {
let mut doc = Document::new("character Alice {}".to_string());
assert!(doc.ast.is_some());
assert!(doc.parse_errors.is_empty());
doc.update("invalid { }".to_string());
assert!(doc.ast.is_none());
assert!(!doc.parse_errors.is_empty());
}
#[test]
fn test_empty_document_has_no_symbols() {
let doc = Document::new("".to_string());
assert_eq!(doc.name_table.all_entries().count(), 0);
}
#[test]
fn test_symbol_table_with_duplicates() {
let source = r#"
character Alice { age: 7 }
character Alice { age: 8 }
"#;
let doc = Document::new(source.to_string());
// Duplicate declarations should be caught during resolution
// NameTable from_file will fail, so we'll have an empty table and
// resolve_errors
assert!(
!doc.resolve_errors.is_empty(),
"Should have validation error for duplicate"
);
}
#[test]
fn test_mixed_declaration_types() {
let source = r#"
species Human {}
character Alice: Human {}
template Child {}
enum Mood { Happy, Sad }
location Home {}
relationship Friends { Alice as friend {} Bob as friend {} }
"#;
let doc = Document::new(source.to_string());
assert!(doc.name_table.resolve_name("Human").is_some());
assert!(doc.name_table.resolve_name("Alice").is_some());
assert!(doc.name_table.resolve_name("Child").is_some());
assert!(doc.name_table.resolve_name("Mood").is_some());
assert!(doc.name_table.resolve_name("Home").is_some());
assert!(doc.name_table.resolve_name("Friends").is_some());
}
#[test]
fn test_life_arc_symbol_extraction() {
let source = r#"
life_arc Growing {
state child {}
state teen {}
state adult {}
}
"#;
let doc = Document::new(source.to_string());
assert!(doc.name_table.resolve_name("Growing").is_some());
let growing = doc.name_table.resolve_name("Growing").unwrap();
assert_eq!(growing.kind, crate::resolve::names::DeclKind::LifeArc);
}
#[test]
fn test_schedule_symbol_extraction() {
let source = r#"
schedule Daily {
08:00 -> 09:00: breakfast {}
09:00 -> 12:00: work {}
}
"#;
let doc = Document::new(source.to_string());
assert!(doc.name_table.resolve_name("Daily").is_some());
let daily = doc.name_table.resolve_name("Daily").unwrap();
assert_eq!(daily.kind, crate::resolve::names::DeclKind::Schedule);
}
#[test]
fn test_institution_symbol_extraction() {
let source = "institution School { type: education }";
let doc = Document::new(source.to_string());
assert!(doc.name_table.resolve_name("School").is_some());
let school = doc.name_table.resolve_name("School").unwrap();
assert_eq!(school.kind, crate::resolve::names::DeclKind::Institution);
}
#[test]
fn test_very_long_identifier() {
let long_name = "a".repeat(1000);
let source = format!("character {} {{}}", long_name);
let doc = Document::new(source);
assert!(doc.name_table.resolve_name(&long_name).is_some());
}
#[test]
fn test_multiline_document() {
let source = "\n\n\n\ncharacter Alice {\n\n\n age: 7\n\n\n}";
let doc = Document::new(source.to_string());
assert!(doc.ast.is_some());
assert!(doc.name_table.resolve_name("Alice").is_some());
}
}

146
src/lsp/formatting.rs Normal file
View File

@@ -0,0 +1,146 @@
//! Document formatting provider
//!
//! Provides auto-formatting for Storybook files
use tower_lsp::lsp_types::{
FormattingOptions,
Position,
Range,
TextEdit,
};
use super::document::Document;
/// Format the entire document
pub fn format_document(doc: &Document, _options: &FormattingOptions) -> Option<Vec<TextEdit>> {
// Don't format if there are parse errors - the AST would be invalid
// and formatting could produce garbage output
doc.ast.as_ref()?;
// For now, implement basic formatting rules:
// 1. 4-space indentation
// 2. Consistent spacing around colons
// 3. Blank lines between top-level declarations
let formatted = format_text(&doc.text);
if formatted == doc.text {
return None; // No changes needed
}
// Return a single edit that replaces the entire document
Some(vec![TextEdit {
range: Range {
start: Position {
line: 0,
character: 0,
},
end: Position {
line: doc.positions.line_count() as u32,
character: 0,
},
},
new_text: formatted,
}])
}
/// Format the text according to Storybook style rules
pub(crate) fn format_text(text: &str) -> String {
let mut result = String::new();
let mut indent_level = 0;
let mut prev_was_blank = false;
let mut in_prose_block = false;
for line in text.lines() {
let trimmed = line.trim();
// Handle prose blocks - don't format their content
if trimmed.starts_with("---") {
in_prose_block = !in_prose_block;
result.push_str(&" ".repeat(indent_level));
result.push_str(trimmed);
result.push('\n');
continue;
}
if in_prose_block {
// Preserve prose content exactly as-is
result.push_str(line);
result.push('\n');
continue;
}
// Skip blank lines
if trimmed.is_empty() {
if !prev_was_blank {
result.push('\n');
prev_was_blank = true;
}
continue;
}
prev_was_blank = false;
// Adjust indentation based on braces
if trimmed.starts_with('}') {
indent_level = indent_level.saturating_sub(1);
}
// Add indentation
result.push_str(&" ".repeat(indent_level));
// Format the line content
let formatted_line = format_line(trimmed);
result.push_str(&formatted_line);
result.push('\n');
// Increase indentation for opening braces
if trimmed.ends_with('{') {
indent_level += 1;
}
}
result
}
/// Format a single line
fn format_line(line: &str) -> String {
// Ensure consistent spacing around colons in field assignments
if let Some(colon_pos) = line.find(':') {
if !line.starts_with("//") && !line.contains("::") {
let before = line[..colon_pos].trim_end();
let after = line[colon_pos + 1..].trim_start();
return format!("{}: {}", before, after);
}
}
line.to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_basic_formatting() {
let input = "character Alice{age:25}";
let formatted = format_text(input);
// Check key formatting features
assert!(
formatted.contains("age: 25"),
"Should have space after colon"
);
assert!(
formatted.contains("character Alice"),
"Should have character declaration"
);
}
#[test]
fn test_preserve_prose() {
let input = "---backstory\nSome irregular spacing\n---";
let formatted = format_text(input);
assert!(formatted.contains("Some irregular spacing"));
}
}

167
src/lsp/formatting_tests.rs Normal file
View File

@@ -0,0 +1,167 @@
//! Tests for LSP document formatting functionality
//!
//! This module tests auto-formatting for:
//! - Spacing normalization around colons
//! - Indentation preservation
//! - Prose block preservation
//! - Edge cases (empty documents, already formatted)
use tower_lsp::lsp_types::FormattingOptions;
use super::{
document::Document,
formatting::{
format_document,
format_text,
},
};
#[test]
fn test_format_adds_space_after_colon() {
let input = "character Alice{age:7}";
let formatted = format_text(input);
// Main formatting: adds space after colon
assert!(formatted.contains("age: 7"));
}
#[test]
fn test_format_normalizes_colon_spacing() {
let input = "character Alice{age : 7}";
let formatted = format_text(input);
// Should normalize to single space after colon
assert!(formatted.contains("age: 7"));
assert!(!formatted.contains("age :"));
}
#[test]
fn test_format_multiple_fields() {
let input = "character Alice{age:7\nname:\"Alice\"}";
let formatted = format_text(input);
assert!(formatted.contains("age: 7"));
assert!(formatted.contains("name: \"Alice\""));
}
#[test]
fn test_format_preserves_module_paths() {
let input = "use characters::Alice;";
let formatted = format_text(input);
// Should NOT add space in :: paths
assert!(formatted.contains("characters::Alice"));
assert!(!formatted.contains("characters: :Alice"));
}
#[test]
fn test_format_preserves_prose_blocks() {
let input = r#"character Alice{
---backstory
Some irregular spacing here
---
age:7}"#;
let formatted = format_text(input);
// Prose content preserved exactly
assert!(formatted.contains("Some irregular spacing here"));
// But other fields still formatted
assert!(formatted.contains("age: 7"));
}
#[test]
fn test_format_indentation_with_braces() {
let input = "character Alice{\nage:7\n}";
let formatted = format_text(input);
// Check indentation (4 spaces)
assert!(formatted.contains("character Alice{"));
assert!(formatted.contains(" age: 7"));
assert!(formatted.contains("}"));
}
#[test]
fn test_format_nested_indentation() {
let input = "template Person{\nnested:{\nvalue:123\n}\n}";
let formatted = format_text(input);
assert!(formatted.contains(" nested: {"));
assert!(formatted.contains(" value: 123"));
assert!(formatted.contains(" }"));
}
#[test]
fn test_format_empty_document() {
let input = "";
let formatted = format_text(input);
assert_eq!(formatted, "");
}
#[test]
fn test_format_whitespace_only() {
let input = " \n \n ";
let formatted = format_text(input);
// Should collapse to single newline or empty
assert!(formatted.len() <= 2);
}
#[test]
fn test_format_already_formatted() {
let input = "character Alice{\n age: 7\n}";
let formatted = format_text(input);
// Should remain essentially the same
assert!(formatted.contains("age: 7"));
}
#[test]
fn test_format_removes_multiple_blank_lines() {
let input = "character Alice{age:7}\n\n\n\ncharacter Bob{age:10}";
let formatted = format_text(input);
// Should consolidate multiple blank lines
let double_newlines = formatted.matches("\n\n\n").count();
assert_eq!(double_newlines, 0, "Should not have triple newlines");
}
#[test]
fn test_format_comments_unchanged() {
let input = "// Comment\ncharacter Alice{age:7}";
let formatted = format_text(input);
assert!(formatted.contains("// Comment"));
}
#[test]
fn test_format_document_returns_text_edit() {
let input = "character Alice{age:7}";
let doc = Document::new(input.to_string());
let result = format_document(&doc, &FormattingOptions::default());
if doc.ast.is_some() {
assert!(
result.is_some(),
"Should return TextEdit for valid document"
);
}
}
#[test]
fn test_format_document_no_changes_returns_none() {
let input = "character Alice{\n age: 7\n}\n";
let doc = Document::new(input.to_string());
let result = format_document(&doc, &FormattingOptions::default());
// If document is already formatted, may return None
// (depends on exact formatting match)
if result.is_none() {
assert!(
doc.ast.is_some(),
"Should only return None if formatting matches"
);
}
}

636
src/lsp/hover.rs Normal file
View File

@@ -0,0 +1,636 @@
//! Hover information provider
use tower_lsp::lsp_types::{
Hover,
HoverContents,
MarkupContent,
MarkupKind,
};
use crate::{
lsp::document::Document,
resolve::names::DeclKind,
syntax::{
ast::{
Declaration,
Value,
},
lexer::{
Lexer,
Token,
},
},
};
/// Get hover information at a position
pub fn get_hover_info(text: &str, line: usize, character: usize) -> Option<Hover> {
// Calculate absolute byte offset from line/character position
let mut byte_offset = 0;
let mut found_line = false;
for (current_line, line_text) in text.lines().enumerate() {
if current_line == line {
found_line = true;
// Check if character position is beyond the line
let line_char_count = line_text.chars().count();
if character >= line_char_count {
return None;
}
// Add the character offset (assuming UTF-8)
let mut char_count = 0;
for (byte_pos, _) in line_text.char_indices() {
if char_count == character {
byte_offset += byte_pos;
break;
}
char_count += 1;
}
break;
}
byte_offset += line_text.len() + 1; // +1 for newline
}
// If line was not found, return None
if !found_line {
return None;
}
// Tokenize and find the token at the cursor position
let lexer = Lexer::new(text);
let mut target_token = None;
for (offset, token, end) in lexer {
if offset <= byte_offset && byte_offset < end {
target_token = Some(token);
break;
}
}
let token = target_token?;
// Generate hover info based on the token
let content = get_token_documentation(&token)?;
Some(Hover {
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: content.to_string(),
}),
range: None,
})
}
/// Get documentation for a token
fn get_token_documentation(token: &Token) -> Option<&'static str> {
match token {
Token::Character => Some("**character** - Defines a character entity\n\nSyntax: `character Name { ... }`"),
Token::Template => Some("**template** - Defines a reusable field template\n\nSyntax: `template Name { ... }`"),
Token::LifeArc => Some("**life_arc** - Defines a state machine for character development\n\nSyntax: `life_arc Name { ... }`"),
Token::Schedule => Some("**schedule** - Defines a daily schedule or routine\n\nSyntax: `schedule Name { ... }`"),
Token::Behavior => Some("**behavior** - Defines a behavior tree for AI\n\nSyntax: `behavior Name { ... }`"),
Token::Institution => Some("**institution** - Defines an organization or group\n\nSyntax: `institution Name { ... }`"),
Token::Relationship => Some("**relationship** - Defines a multi-party relationship\n\nSyntax: `relationship Name { ... }`"),
Token::Location => Some("**location** - Defines a place or setting\n\nSyntax: `location Name { ... }`"),
Token::Species => Some("**species** - Defines a species with templates\n\nSyntax: `species Name { ... }`"),
Token::Enum => Some("**enum** - Defines an enumeration type\n\nSyntax: `enum Name { ... }`"),
Token::Use => Some("**use** - Imports declarations from other files\n\nSyntax: `use path::to::item;`"),
Token::From => Some("**from** - Applies templates to a character\n\nSyntax: `character Name from Template { ... }`"),
Token::Include => Some("**include** - Includes another template\n\nSyntax: `include TemplateName`"),
Token::State => Some("**state** - Defines a state in a life arc\n\nSyntax: `state name { ... }`"),
Token::On => Some("**on** - Defines a transition or enter handler\n\nSyntax: `on condition -> target` or `on enter { ... }`"),
Token::Strict => Some("**strict** - Enforces that a template only accepts defined fields"),
_ => None,
}
}
/// Get semantic hover information for symbols
pub fn get_semantic_hover_info(doc: &Document, line: usize, character: usize) -> Option<Hover> {
let ast = doc.ast.as_ref()?;
// Calculate absolute byte offset from line/character position
let mut byte_offset = 0;
let mut found_line = false;
for (current_line, line_text) in doc.text.lines().enumerate() {
if current_line == line {
found_line = true;
// Check if character position is beyond the line
let line_char_count = line_text.chars().count();
if character >= line_char_count {
return None;
}
let mut char_count = 0;
for (byte_pos, _) in line_text.char_indices() {
if char_count == character {
byte_offset += byte_pos;
break;
}
char_count += 1;
}
break;
}
byte_offset += line_text.len() + 1; // +1 for newline
}
if !found_line {
return None;
}
// Tokenize and find the identifier at the cursor position
let lexer = Lexer::new(&doc.text);
let mut target_ident = None;
for (offset, token, end) in lexer {
if offset <= byte_offset && byte_offset < end {
if let Token::Ident(name) = token {
target_ident = Some(name);
}
break;
}
}
let word = target_ident?;
// Look up the symbol in the name table
let symbol_info = doc.name_table.lookup(&[word.clone()])?;
// Find the declaration in the AST
for decl in &ast.declarations {
let decl_name = get_declaration_name(decl);
if decl_name.as_deref() == Some(word.as_str()) {
return Some(format_declaration_hover(decl, &symbol_info.kind));
}
}
None
}
/// Extract the name from a declaration
fn get_declaration_name(decl: &Declaration) -> Option<String> {
match decl {
| Declaration::Character(c) => Some(c.name.clone()),
| Declaration::Template(t) => Some(t.name.clone()),
| Declaration::Species(s) => Some(s.name.clone()),
| Declaration::Enum(e) => Some(e.name.clone()),
| Declaration::Location(l) => Some(l.name.clone()),
| Declaration::Institution(i) => Some(i.name.clone()),
| Declaration::Relationship(r) => Some(r.name.clone()),
| Declaration::LifeArc(la) => Some(la.name.clone()),
| Declaration::Schedule(s) => Some(s.name.clone()),
| Declaration::Behavior(b) => Some(b.name.clone()),
| Declaration::Use(_) => None,
}
}
/// Format hover information for a declaration
fn format_declaration_hover(decl: &Declaration, _kind: &DeclKind) -> Hover {
let content = match decl {
| Declaration::Character(c) => format_character_hover(c),
| Declaration::Template(t) => format_template_hover(t),
| Declaration::Species(s) => format_species_hover(s),
| Declaration::Enum(e) => format_enum_hover(e),
| Declaration::Location(l) => format_location_hover(l),
| Declaration::Institution(i) => format_institution_hover(i),
| Declaration::Relationship(r) => format_relationship_hover(r),
| Declaration::LifeArc(la) => format_life_arc_hover(la),
| Declaration::Schedule(s) => format_schedule_hover(s),
| Declaration::Behavior(b) => format_behavior_hover(b),
| Declaration::Use(_) => "**use** declaration".to_string(),
};
Hover {
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: content,
}),
range: None,
}
}
/// Format character hover information
fn format_character_hover(c: &crate::syntax::ast::Character) -> String {
let mut content = format!("**character** `{}`\n\n", c.name);
// Species
if let Some(ref species) = c.species {
content.push_str(&format!("**Species:** `{}`\n\n", species));
}
// Templates
if let Some(ref templates) = c.template {
content.push_str(&format!(
"**Templates:** {}\n\n",
templates
.iter()
.map(|t| format!("`{}`", t))
.collect::<Vec<_>>()
.join(", ")
));
}
// Fields
if !c.fields.is_empty() {
content.push_str("**Fields:**\n");
for field in &c.fields {
let value_preview = format_value_preview(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, value_preview));
}
content.push('\n');
}
// Prose blocks count
let prose_count = c
.fields
.iter()
.filter(|f| matches!(f.value, Value::ProseBlock(_)))
.count();
if prose_count > 0 {
content.push_str(&format!("*{} prose block(s)*\n", prose_count));
}
content
}
/// Format template hover information
fn format_template_hover(t: &crate::syntax::ast::Template) -> String {
let mut content = format!("**template** `{}`\n\n", t.name);
if t.strict {
content.push_str("*strict mode*\n\n");
}
// Includes
if !t.includes.is_empty() {
content.push_str(&format!(
"**Includes:** {}\n\n",
t.includes
.iter()
.map(|i| format!("`{}`", i))
.collect::<Vec<_>>()
.join(", ")
));
}
// Fields with types
if !t.fields.is_empty() {
content.push_str("**Fields:**\n");
for field in &t.fields {
let type_name = format_value_as_type(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, type_name));
}
content.push('\n');
}
content
}
/// Format species hover information
fn format_species_hover(s: &crate::syntax::ast::Species) -> String {
let mut content = format!("**species** `{}`\n\n", s.name);
// Includes
if !s.includes.is_empty() {
content.push_str(&format!(
"**Includes:** {}\n\n",
s.includes
.iter()
.map(|i| format!("`{}`", i))
.collect::<Vec<_>>()
.join(", ")
));
}
// Fields with types
if !s.fields.is_empty() {
content.push_str("**Fields:**\n");
for field in &s.fields {
let type_name = format_value_as_type(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, type_name));
}
content.push('\n');
}
content
}
/// Format enum hover information
fn format_enum_hover(e: &crate::syntax::ast::EnumDecl) -> String {
let mut content = format!("**enum** `{}`\n\n", e.name);
if !e.variants.is_empty() {
content.push_str("**Variants:**\n");
for variant in &e.variants {
content.push_str(&format!("- `{}`\n", variant));
}
content.push('\n');
}
content
}
/// Format location hover information
fn format_location_hover(l: &crate::syntax::ast::Location) -> String {
let mut content = format!("**location** `{}`\n\n", l.name);
if !l.fields.is_empty() {
content.push_str("**Properties:**\n");
for field in &l.fields {
let value_preview = format_value_preview(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, value_preview));
}
content.push('\n');
}
content
}
/// Format institution hover information
fn format_institution_hover(i: &crate::syntax::ast::Institution) -> String {
let mut content = format!("**institution** `{}`\n\n", i.name);
if !i.fields.is_empty() {
content.push_str("**Properties:**\n");
for field in &i.fields {
let value_preview = format_value_preview(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, value_preview));
}
content.push('\n');
}
content
}
/// Format relationship hover information
fn format_relationship_hover(r: &crate::syntax::ast::Relationship) -> String {
let mut content = format!("**relationship** `{}`\n\n", r.name);
// Participants
if !r.participants.is_empty() {
content.push_str(&format!(
"**Participants:** {}\n\n",
r.participants
.iter()
.map(|p| {
let name = p.name.join(".");
if let Some(ref role) = p.role {
format!("`{}` as {}", name, role)
} else {
format!("`{}`", name)
}
})
.collect::<Vec<_>>()
.join(", ")
));
}
// Fields
if !r.fields.is_empty() {
content.push_str("**Fields:**\n");
for field in &r.fields {
let value_preview = format_value_preview(&field.value);
content.push_str(&format!("- `{}`: {}\n", field.name, value_preview));
}
content.push('\n');
}
content
}
/// Format life arc hover information
fn format_life_arc_hover(la: &crate::syntax::ast::LifeArc) -> String {
let mut content = format!("**life_arc** `{}`\n\n", la.name);
if !la.states.is_empty() {
content.push_str(&format!("**States:** {} states\n\n", la.states.len()));
// Show first few states
let preview_count = 5;
for state in la.states.iter().take(preview_count) {
content.push_str(&format!(
"- `{}` ({} transitions)\n",
state.name,
state.transitions.len()
));
}
if la.states.len() > preview_count {
content.push_str(&format!(
"- *... and {} more*\n",
la.states.len() - preview_count
));
}
content.push('\n');
}
content
}
/// Format schedule hover information
fn format_schedule_hover(s: &crate::syntax::ast::Schedule) -> String {
let mut content = format!("**schedule** `{}`\n\n", s.name);
if !s.blocks.is_empty() {
content.push_str(&format!("**Time Blocks:** {} entries\n\n", s.blocks.len()));
// Show first few blocks
let preview_count = 5;
for block in s.blocks.iter().take(preview_count) {
let start_str = format_time(&block.start);
let end_str = format_time(&block.end);
content.push_str(&format!(
"- {} - {}: {}\n",
start_str, end_str, block.activity
));
}
if s.blocks.len() > preview_count {
content.push_str(&format!(
"- *... and {} more*\n",
s.blocks.len() - preview_count
));
}
content.push('\n');
}
content
}
/// Format behavior hover information
fn format_behavior_hover(b: &crate::syntax::ast::Behavior) -> String {
let mut content = format!("**behavior** `{}`\n\n", b.name);
content.push_str("**Behavior Tree:**\n");
content.push_str(&format_behavior_node_preview(&b.root, 0));
content.push('\n');
content
}
/// Format a behavior tree node preview (recursively, up to depth 2)
fn format_behavior_node_preview(node: &crate::syntax::ast::BehaviorNode, depth: usize) -> String {
if depth > 2 {
return format!("{} *...*\n", " ".repeat(depth));
}
let indent = " ".repeat(depth);
let mut content = String::new();
match node {
| crate::syntax::ast::BehaviorNode::Action(name, params) => {
content.push_str(&format!("{}- Action: `{}`", indent, name));
if !params.is_empty() {
content.push_str(&format!(" ({} params)", params.len()));
}
content.push('\n');
},
| crate::syntax::ast::BehaviorNode::Sequence { children, .. } => {
content.push_str(&format!(
"{}- Sequence ({} children)\n",
indent,
children.len()
));
for child in children.iter().take(3) {
content.push_str(&format_behavior_node_preview(child, depth + 1));
}
if children.len() > 3 {
content.push_str(&format!(
"{} *... and {} more*\n",
indent,
children.len() - 3
));
}
},
| crate::syntax::ast::BehaviorNode::Selector { children, .. } => {
content.push_str(&format!(
"{}- Selector ({} children)\n",
indent,
children.len()
));
for child in children.iter().take(3) {
content.push_str(&format_behavior_node_preview(child, depth + 1));
}
if children.len() > 3 {
content.push_str(&format!(
"{} *... and {} more*\n",
indent,
children.len() - 3
));
}
},
| crate::syntax::ast::BehaviorNode::Condition(_) => {
content.push_str(&format!("{}- Condition\n", indent));
},
| crate::syntax::ast::BehaviorNode::Decorator {
decorator_type,
child,
..
} => {
content.push_str(&format!("{}- Decorator: `{:?}`\n", indent, decorator_type));
content.push_str(&format_behavior_node_preview(child, depth + 1));
},
| crate::syntax::ast::BehaviorNode::SubTree(name) => {
content.push_str(&format!("{}- SubTree: `{}`\n", indent, name.join(".")));
},
}
content
}
/// Format a value as a type name (for template/species fields)
fn format_value_as_type(value: &Value) -> String {
match value {
| Value::Identifier(path) => path.join("."),
| Value::String(_) => "String".to_string(),
| Value::Int(_) => "Int".to_string(),
| Value::Float(_) => "Float".to_string(),
| Value::Bool(_) => "Bool".to_string(),
| Value::List(items) => {
if items.is_empty() {
"List".to_string()
} else {
format!("[{}]", format_value_as_type(&items[0]))
}
},
| Value::Object(_) => "Object".to_string(),
| Value::Range(start, end) => {
format!(
"{}..{}",
format_value_as_type(start),
format_value_as_type(end)
)
},
| Value::Time(_) => "Time".to_string(),
| Value::Duration(_) => "Duration".to_string(),
| Value::ProseBlock(_) => "ProseBlock".to_string(),
| Value::Override(_) => "Override".to_string(),
}
}
/// Format a value preview (for character/location fields)
fn format_value_preview(value: &Value) -> String {
match value {
| Value::Identifier(path) => format!("`{}`", path.join(".")),
| Value::String(s) => format!("\"{}\"", truncate(s, 50)),
| Value::Int(n) => n.to_string(),
| Value::Float(f) => f.to_string(),
| Value::Bool(b) => b.to_string(),
| Value::List(items) => {
if items.is_empty() {
"[]".to_string()
} else {
format!("[{} items]", items.len())
}
},
| Value::Object(fields) => format!("{{{} fields}}", fields.len()),
| Value::Range(start, end) => {
format!(
"{}..{}",
format_value_preview(start),
format_value_preview(end)
)
},
| Value::Time(time) => format_time(time),
| Value::Duration(duration) => format_duration(duration),
| Value::ProseBlock(prose) => format!("*prose ({} chars)*", prose.content.len()),
| Value::Override(override_val) => format!("*{} overrides*", override_val.overrides.len()),
}
}
/// Format a time value
fn format_time(time: &crate::syntax::ast::Time) -> String {
if time.second == 0 {
format!("{:02}:{:02}", time.hour, time.minute)
} else {
format!("{:02}:{:02}:{:02}", time.hour, time.minute, time.second)
}
}
/// Format a duration value
fn format_duration(duration: &crate::syntax::ast::Duration) -> String {
let mut parts = Vec::new();
if duration.hours > 0 {
parts.push(format!("{}h", duration.hours));
}
if duration.minutes > 0 {
parts.push(format!("{}m", duration.minutes));
}
if duration.seconds > 0 {
parts.push(format!("{}s", duration.seconds));
}
if parts.is_empty() {
"0s".to_string()
} else {
parts.join(" ")
}
}
/// Truncate a string to a maximum length
fn truncate(s: &str, max_len: usize) -> String {
if s.len() <= max_len {
s.to_string()
} else {
format!("{}...", &s[..max_len - 3])
}
}

350
src/lsp/hover_tests.rs Normal file
View File

@@ -0,0 +1,350 @@
//! Tests for LSP hover functionality
//!
//! This module tests hover information display for:
//! - Keywords (character, template, behavior, etc.)
//! - Type information for symbols
//! - Documentation display
//! - Edge cases (whitespace, EOF, invalid positions)
use tower_lsp::lsp_types::{
HoverContents,
MarkupContent,
MarkupKind,
};
use super::hover::get_hover_info;
#[test]
fn test_hover_on_character_keyword() {
let source = "character Alice { age: 7 }";
// Hover over "character" keyword at position 0,5 (middle of "character")
let hover = get_hover_info(source, 0, 5);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { kind, value }) => {
assert_eq!(kind, MarkupKind::Markdown);
assert!(value.contains("character"));
assert!(value.contains("Defines a character entity"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_template_keyword() {
let source = "template Person { name: String }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("template"));
assert!(value.contains("reusable field template"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_behavior_keyword() {
let source = "behavior WalkAround { walk_to_garden }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("behavior"));
assert!(value.contains("behavior tree"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_life_arc_keyword() {
let source = "life_arc Growth { state child { } }";
let hover = get_hover_info(source, 0, 5);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("life_arc"));
assert!(value.contains("state machine"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_schedule_keyword() {
let source = "schedule Daily { 08:00-12:00 work }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("schedule"));
assert!(value.contains("daily schedule"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_institution_keyword() {
let source = "institution Bakery { location: wonderland }";
let hover = get_hover_info(source, 0, 6);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("institution"));
assert!(value.contains("organization"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_relationship_keyword() {
let source = "relationship Friendship { participants: [Alice, Bob] }";
let hover = get_hover_info(source, 0, 8);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("relationship"));
assert!(value.contains("multi-party"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_location_keyword() {
let source = "location Garden { description: \"A beautiful garden\" }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("location"));
assert!(value.contains("place"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_species_keyword() {
let source = "species Human { include CommonTraits }";
let hover = get_hover_info(source, 0, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("species"));
assert!(value.contains("templates"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_enum_keyword() {
let source = "enum Emotion { Happy, Sad, Angry }";
let hover = get_hover_info(source, 0, 2);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("enum"));
assert!(value.contains("enumeration"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_use_keyword() {
let source = "use characters::Alice;";
let hover = get_hover_info(source, 0, 1);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("use"));
assert!(value.contains("Imports"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_from_keyword() {
let source = "character Alice from Person { }";
let hover = get_hover_info(source, 0, 17);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("from"));
assert!(value.contains("templates"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_state_keyword() {
let source = "life_arc Growth { state child { } }";
let hover = get_hover_info(source, 0, 19);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("state"));
assert!(value.contains("life arc"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_on_whitespace_returns_none() {
let source = "character Alice { age: 7 }";
// Hover over whitespace between "character" and "Alice"
let hover = get_hover_info(source, 0, 10);
assert!(hover.is_none(), "Hovering on whitespace should return None");
}
#[test]
fn test_hover_on_unknown_word_returns_none() {
let source = "character Alice { age: 7 }";
// Hover over "Alice" (not a keyword)
let hover = get_hover_info(source, 0, 12);
assert!(
hover.is_none(),
"Hovering on non-keyword should return None"
);
}
#[test]
fn test_hover_at_eof_returns_none() {
let source = "character Alice { age: 7 }";
// Try to hover beyond the line
let hover = get_hover_info(source, 0, 100);
assert!(hover.is_none(), "Hovering beyond line should return None");
}
#[test]
fn test_hover_on_invalid_line_returns_none() {
let source = "character Alice { age: 7 }";
// Try to hover on a line that doesn't exist
let hover = get_hover_info(source, 100, 0);
assert!(
hover.is_none(),
"Hovering on invalid line should return None"
);
}
#[test]
fn test_hover_on_comment_returns_none() {
let source = "// This is a comment\ncharacter Alice { }";
// Hover over the comment
let hover = get_hover_info(source, 0, 5);
// Comments don't contain keywords, so this should return None
assert!(hover.is_none(), "Hovering on comment should return None");
}
#[test]
fn test_hover_multiline_document() {
let source = r#"
character Alice { age: 7 }
template Person { name: String }
behavior Walk { walk_around }
"#;
// Hover on "template" keyword on line 2
let hover = get_hover_info(source, 2, 4);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { value, .. }) => {
assert!(value.contains("template"));
},
| _ => panic!("Expected markup content"),
}
}
#[test]
fn test_hover_preserves_markdown_formatting() {
let source = "character Alice { }";
let hover = get_hover_info(source, 0, 5);
assert!(hover.is_some());
let hover = hover.unwrap();
match hover.contents {
| HoverContents::Markup(MarkupContent { kind, value }) => {
assert_eq!(kind, MarkupKind::Markdown);
// Check for markdown formatting
assert!(value.contains("**character**") || value.contains("character"));
assert!(value.contains("`character Name { ... }`") || value.contains("Syntax"));
},
| _ => panic!("Expected markup content"),
}
}

205
src/lsp/inlay_hints.rs Normal file
View File

@@ -0,0 +1,205 @@
//! Inlay hints for implicit information
//!
//! Provides inline annotations showing:
//! - Parameter names in action calls
//! - Inferred types for field values
//! - Template/species field sources
use tower_lsp::lsp_types::{
InlayHint,
InlayHintKind,
InlayHintLabel,
Position,
};
use super::document::Document;
use crate::syntax::ast::{
Declaration,
Field,
Value,
};
/// Get inlay hints for a document range
pub fn get_inlay_hints(doc: &Document, start: Position, end: Position) -> Option<Vec<InlayHint>> {
let ast = doc.ast.as_ref()?;
let mut hints = Vec::new();
let mut positions = doc.positions.clone();
// Convert positions to offsets
let start_line = start.line as usize;
let end_line = end.line as usize;
// Process all declarations
for decl in &ast.declarations {
match decl {
| Declaration::Character(character) => {
// Skip if character is outside requested range
if character.span.start_line > end_line || character.span.end_line < start_line {
continue;
}
// Add type hints for character fields
for field in &character.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Template(template) => {
// Skip if template is outside requested range
if template.span.start_line > end_line || template.span.end_line < start_line {
continue;
}
// Add type hints for template fields
for field in &template.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Species(species) => {
// Skip if species is outside requested range
if species.span.start_line > end_line || species.span.end_line < start_line {
continue;
}
// Add type hints for species fields
for field in &species.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Institution(institution) => {
// Skip if institution is outside requested range
if institution.span.start_line > end_line || institution.span.end_line < start_line
{
continue;
}
// Add type hints for institution fields
for field in &institution.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Location(location) => {
// Skip if location is outside requested range
if location.span.start_line > end_line || location.span.end_line < start_line {
continue;
}
// Add type hints for location fields
for field in &location.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Relationship(relationship) => {
// Skip if relationship is outside requested range
if relationship.span.start_line > end_line ||
relationship.span.end_line < start_line
{
continue;
}
// Add type hints for relationship fields
for field in &relationship.fields {
if field.span.start_line >= start_line && field.span.start_line <= end_line {
add_type_hint(&mut hints, &mut positions, field);
}
}
},
| Declaration::Behavior(behavior) => {
// Skip if behavior is outside requested range
if behavior.span.start_line > end_line || behavior.span.end_line < start_line {
continue;
}
// TODO: Add parameter name hints for action calls in behavior
// trees Would need to traverse BehaviorNode
// tree and match actions to schema
},
| _ => {},
}
}
if hints.is_empty() {
None
} else {
Some(hints)
}
}
/// Add type hint for a field value
fn add_type_hint(
hints: &mut Vec<InlayHint>,
positions: &mut crate::position::PositionTracker,
field: &Field,
) {
let type_str = infer_value_type(&field.value);
// Only add hints for non-obvious types
// Skip if the type is clear from the literal (e.g., "string", 123, true)
let should_hint = match &field.value {
| Value::String(_) | Value::Int(_) | Value::Float(_) | Value::Bool(_) => false,
| Value::Identifier(_) => true, // Show type for identifier references
| Value::List(_) => true, // Show list element type
| Value::Object(_) => false, // Object structure is visible
| Value::Range(_, _) => false, // Range syntax is clear
| Value::Time(_) => false, // Time format is clear
| Value::Duration(_) => false, // Duration format is clear
| Value::ProseBlock(_) => false, // Prose is obvious
| Value::Override(_) => true, // Show what's being overridden
};
if !should_hint {
return;
}
// Position the hint at the end of the field value
let (line, col) = positions.offset_to_position(field.span.end);
hints.push(InlayHint {
position: Position {
line: line as u32,
character: col as u32,
},
label: InlayHintLabel::String(format!(": {}", type_str)),
kind: Some(InlayHintKind::TYPE),
text_edits: None,
tooltip: None,
padding_left: Some(true),
padding_right: None,
data: None,
});
}
/// Infer the type of a value for display
fn infer_value_type(value: &Value) -> String {
match value {
| Value::Identifier(path) => path.join("."),
| Value::String(_) => "String".to_string(),
| Value::Int(_) => "Int".to_string(),
| Value::Float(_) => "Float".to_string(),
| Value::Bool(_) => "Bool".to_string(),
| Value::List(items) => {
if items.is_empty() {
"[]".to_string()
} else {
format!("[{}]", infer_value_type(&items[0]))
}
},
| Value::Object(_) => "Object".to_string(),
| Value::Range(start, end) => {
format!("{}..{}", infer_value_type(start), infer_value_type(end))
},
| Value::Time(_) => "Time".to_string(),
| Value::Duration(_) => "Duration".to_string(),
| Value::ProseBlock(_) => "Prose".to_string(),
| Value::Override(_) => "Override".to_string(),
}
}

59
src/lsp/mod.rs Normal file
View File

@@ -0,0 +1,59 @@
//! LSP (Language Server Protocol) implementation for Storybook DSL
//!
//! This module provides language server features including:
//! - Real-time diagnostics (validation errors/warnings)
//! - Hover information (documentation, type info)
//! - Document symbols (outline view)
//! - Go-to-definition
//! - Find references
//! - Autocomplete
//! - Document formatting
pub mod code_actions;
pub mod completion;
pub mod definition;
pub mod diagnostics;
pub mod document;
pub mod formatting;
pub mod hover;
pub mod inlay_hints;
pub mod references;
pub mod rename;
pub mod semantic_tokens;
pub mod server;
pub mod symbols;
#[cfg(test)]
mod tests;
#[cfg(test)]
mod parser_test;
#[cfg(test)]
mod behavior_tests;
#[cfg(test)]
mod diagnostics_tests;
#[cfg(test)]
mod document_edge_tests;
#[cfg(test)]
mod navigation_tests;
#[cfg(test)]
mod validation_tests;
#[cfg(test)]
mod completion_tests;
#[cfg(test)]
mod code_actions_tests;
#[cfg(test)]
mod hover_tests;
#[cfg(test)]
mod formatting_tests;
pub use server::StorybookLanguageServer;

389
src/lsp/navigation_tests.rs Normal file
View File

@@ -0,0 +1,389 @@
//! Tests for navigation features (go-to-definition and find-references)
#[cfg(test)]
mod tests {
use tower_lsp::lsp_types::{
GotoDefinitionParams,
Position,
ReferenceContext,
ReferenceParams,
TextDocumentIdentifier,
TextDocumentPositionParams,
Url,
};
use crate::lsp::{
definition,
document::Document,
references,
};
fn make_uri() -> Url {
Url::parse("file:///test.sb").unwrap()
}
#[test]
fn test_goto_definition_character() {
let source = "character Alice { age: 7 }";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Alice" (at character 10)
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 10,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(result.is_some(), "Should find definition for Alice");
// Verify it points to the character declaration
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::GotoDefinitionResponse::Scalar(location) => {
assert_eq!(location.uri, uri);
// Should span the whole character declaration
assert_eq!(location.range.start.line, 0);
},
| _ => panic!("Expected scalar location"),
}
}
}
#[test]
fn test_goto_definition_not_found() {
let source = "character Alice { age: 7 }";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on whitespace
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 0,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(result.is_none(), "Should not find definition on whitespace");
}
#[test]
fn test_goto_definition_template() {
let source = r#"
template Child {
age: number
}
character Alice: Child {}
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Child" in template declaration (line 1)
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 9,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(
result.is_some(),
"Should find definition for Child template"
);
}
#[test]
fn test_find_references_character() {
let source = r#"
character Alice { age: 7 }
character Bob { friend: Alice }
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Alice" in first line
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 10,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find references to Alice");
if let Some(locations) = result {
// Should find at least 2 references: the declaration and the use in Bob's
// friend field
assert!(
locations.len() >= 2,
"Should find multiple references to Alice, found {}",
locations.len()
);
}
}
#[test]
fn test_find_references_single_occurrence() {
let source = "character Bob { age: 5 }";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Bob"
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 10,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find reference to Bob");
if let Some(locations) = result {
assert_eq!(locations.len(), 1, "Should find exactly one reference");
}
}
#[test]
fn test_find_references_not_found() {
let source = "character Alice { age: 7 }";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on punctuation character (the opening brace)
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 16,
}, // Position on '{'
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
// It's okay if this finds something or nothing - the important thing is it
// doesn't crash If word_at_offset returns None, this will be None
// If it returns the nearby word, that's also acceptable behavior
let _ = result; // Just verify it doesn't panic
}
#[test]
fn test_find_references_word_boundaries() {
let source = r#"
character Alice { age: 7 }
character Alison { age: 8 }
character Ali { age: 6 }
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Alice"
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 10,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find references to Alice");
if let Some(locations) = result {
// Should only find "Alice", not "Alison" or "Ali"
assert_eq!(locations.len(), 1, "Should only find exact match for Alice");
}
}
#[test]
fn test_find_references_multiple_files_same_name() {
let source = r#"
template Child { age: number }
character Alice: Child { age: 7 }
character Bob: Child { age: 5 }
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Child" in template declaration
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 9,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find references to Child template");
if let Some(locations) = result {
// Should find declaration + 2 uses (Alice: Child, Bob: Child)
assert!(
locations.len() >= 3,
"Should find template declaration and uses, found {}",
locations.len()
);
}
}
#[test]
fn test_goto_definition_behavior() {
let source = r#"
behavior WalkAround {
patrol
}
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "WalkAround"
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 9,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(
result.is_some(),
"Should find definition for WalkAround behavior"
);
}
#[test]
fn test_find_references_species() {
let source = r#"
species Human {}
character Alice: Human {}
character Bob: Human {}
"#;
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Human" in species declaration
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 8,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let result = references::find_references(&doc, &params, &uri);
assert!(result.is_some(), "Should find references to Human species");
if let Some(locations) = result {
// Should find declaration + 2 uses
assert!(
locations.len() >= 3,
"Should find species declaration and character uses, found {}",
locations.len()
);
}
}
#[test]
fn test_goto_definition_multiline() {
// Don't start with newline to make line numbers clearer
let source = "character Alice {\n age: 7\n}\n\ncharacter Bob {\n friend: Alice\n}";
let doc = Document::new(source.to_string());
let uri = make_uri();
// Position on "Alice" in Bob's friend field (line 5)
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 5,
character: 12,
},
},
work_done_progress_params: Default::default(),
partial_result_params: Default::default(),
};
let result = definition::get_definition(&doc, &params, &uri);
assert!(result.is_some(), "Should find definition for Alice");
if let Some(response) = result {
match response {
| tower_lsp::lsp_types::GotoDefinitionResponse::Scalar(location) => {
// Should point to Alice's declaration on line 0
assert_eq!(location.range.start.line, 0);
},
| _ => panic!("Expected scalar location"),
}
}
}
}

66
src/lsp/parser_test.rs Normal file
View File

@@ -0,0 +1,66 @@
//! Quick parser test to debug parsing issues
#[cfg(test)]
mod tests {
use crate::lsp::document::Document;
#[test]
fn test_simple_character() {
let input = "character Alice { age: 7 }";
let doc = Document::new(input.to_string());
println!("Parse errors: {:?}", doc.parse_errors);
if doc.ast.is_some() {
println!("SUCCESS: Parsed AST");
} else {
println!("FAILED: No AST produced");
}
assert!(doc.ast.is_some(), "Should parse simple character");
assert!(doc.parse_errors.is_empty(), "Should have no parse errors");
}
#[test]
fn test_species_first() {
let input = r#"
species Human {
intelligence: high
}
character Alice: Human {
age: 7
}
"#;
let doc = Document::new(input.to_string());
if !doc.parse_errors.is_empty() {
for err in &doc.parse_errors {
eprintln!("Parse error: {}", err.message);
}
}
assert!(doc.ast.is_some(), "Should parse species and character");
assert!(doc.parse_errors.is_empty(), "Should have no parse errors");
}
#[test]
fn test_with_prose() {
let input = r#"
character Alice {
age: 7
---backstory
A curious girl
---
}
"#;
let doc = Document::new(input.to_string());
if !doc.parse_errors.is_empty() {
for err in &doc.parse_errors {
eprintln!("Parse error: {}", err.message);
}
}
assert!(doc.ast.is_some(), "Should parse character with prose");
}
}

78
src/lsp/references.rs Normal file
View File

@@ -0,0 +1,78 @@
//! Find references provider
//!
//! Finds all references to a symbol across the document
use tower_lsp::lsp_types::{
Location,
Range,
ReferenceParams,
Url,
};
use super::document::Document;
/// Find all references to a symbol at a position
pub fn find_references(
doc: &Document,
params: &ReferenceParams,
uri: &Url,
) -> Option<Vec<Location>> {
let position = params.text_document_position.position;
// Convert LSP position to byte offset
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Get the word at the cursor
let word = doc.word_at_offset(offset)?;
// Search for all occurrences of this word in the text
let mut locations = Vec::new();
let mut positions = doc.positions.clone();
for (byte_offset, _) in doc.text.match_indices(&word) {
// Check if this is a word boundary match
if is_word_boundary(&doc.text, byte_offset, word.len()) {
let (start_line, start_col) = positions.offset_to_position(byte_offset);
let (end_line, end_col) = positions.offset_to_position(byte_offset + word.len());
locations.push(Location {
uri: uri.clone(),
range: Range {
start: tower_lsp::lsp_types::Position {
line: start_line as u32,
character: start_col as u32,
},
end: tower_lsp::lsp_types::Position {
line: end_line as u32,
character: end_col as u32,
},
},
});
}
}
if locations.is_empty() {
None
} else {
Some(locations)
}
}
/// Convert LSP position to byte offset
fn position_to_offset(doc: &Document, line: usize, character: usize) -> Option<usize> {
let line_start = doc.positions.line_offset(line)?;
Some(line_start + character)
}
/// Check if a match is at a word boundary
fn is_word_boundary(text: &str, offset: usize, len: usize) -> bool {
let before_ok = offset == 0 || !is_word_char_at(text, offset - 1);
let after_ok = offset + len >= text.len() || !is_word_char_at(text, offset + len);
before_ok && after_ok
}
fn is_word_char_at(text: &str, offset: usize) -> bool {
text.chars()
.nth(offset)
.is_some_and(|c| c.is_alphanumeric() || c == '_')
}

399
src/lsp/rename.rs Normal file
View File

@@ -0,0 +1,399 @@
//! Rename refactoring provider
//!
//! Provides semantic, workspace-wide symbol renaming using NameTable
use std::collections::HashMap;
use tower_lsp::lsp_types::{
Position,
Range,
RenameParams,
TextEdit,
Url,
WorkspaceEdit,
};
use super::document::Document;
use crate::{
position::PositionTracker,
resolve::find_all_references,
};
/// Perform a workspace-wide semantic rename operation
pub fn get_rename_edits(
documents: &HashMap<Url, Document>,
params: &RenameParams,
uri: &Url,
) -> Option<WorkspaceEdit> {
let doc = documents.get(uri)?;
let position = params.text_document_position.position;
// Convert LSP position to byte offset
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Get the word at the cursor
let old_name = doc.word_at_offset(offset)?;
// Look up the symbol in the name table - this validates it's a real symbol
let entry = doc.name_table.resolve_name(&old_name)?;
// Get the symbol's kind for semantic matching
let symbol_kind = entry.kind;
// Collect all ASTs and build file_index -> URL mapping
let mut file_asts = Vec::new();
let mut file_url_map: HashMap<usize, Url> = HashMap::new();
let mut url_file_map: HashMap<Url, usize> = HashMap::new();
let mut url_positions_map: HashMap<Url, PositionTracker> = HashMap::new();
for (doc_uri, document) in documents {
if let Some(ref ast) = document.ast {
let file_index = file_asts.len();
file_asts.push(ast.clone());
file_url_map.insert(file_index, doc_uri.clone());
url_file_map.insert(doc_uri.clone(), file_index);
url_positions_map.insert(doc_uri.clone(), document.positions.clone());
}
}
// Find all semantic references using the language layer
let references = find_all_references(&file_asts, &old_name, symbol_kind);
// Convert references to TextEdits grouped by file
let mut all_changes: HashMap<Url, Vec<TextEdit>> = HashMap::new();
for reference in references {
// Get the URL for this file
if let Some(url) = file_url_map.get(&reference.file_index) {
if let Some(mut positions) = url_positions_map.get(url).cloned() {
// Convert byte offsets to line/col positions
let (start_line, start_col) = positions.offset_to_position(reference.span.start);
let (end_line, end_col) = positions.offset_to_position(reference.span.end);
let edit = TextEdit {
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
new_text: params.new_name.clone(),
};
all_changes
.entry(url.clone())
.or_default()
.push(edit);
}
}
}
if all_changes.is_empty() {
return None;
}
Some(WorkspaceEdit {
changes: Some(all_changes),
document_changes: None,
change_annotations: None,
})
}
/// Prepare rename - check if rename is valid at this position
pub fn prepare_rename(doc: &Document, position: Position) -> Option<Range> {
// Convert LSP position to byte offset
let offset = position_to_offset(doc, position.line as usize, position.character as usize)?;
// Get the word at the cursor
let word = doc.word_at_offset(offset)?;
// Check if this is a valid symbol in the name table
let entry = doc.name_table.resolve_name(&word)?;
// Return the range of the symbol at its definition
let mut positions = doc.positions.clone();
let (start_line, start_col) = positions.offset_to_position(entry.span.start);
let (end_line, end_col) = positions.offset_to_position(entry.span.end);
Some(Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
})
}
/// Convert LSP position to byte offset
fn position_to_offset(doc: &Document, line: usize, character: usize) -> Option<usize> {
let line_start = doc.positions.line_offset(line)?;
Some(line_start + character)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::lsp::document::Document;
fn make_documents(files: Vec<(&str, &str)>) -> HashMap<Url, Document> {
files
.into_iter()
.map(|(uri_str, content)| {
let uri = Url::parse(uri_str).unwrap();
let doc = Document::new(content.to_string());
(uri, doc)
})
.collect()
}
#[test]
fn test_rename_character_single_file() {
let source = r#"
character Alice {}
character Bob { friend: Alice }
"#;
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 11,
},
},
new_name: "Alicia".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let workspace_edit = result.unwrap();
let changes = workspace_edit.changes.unwrap();
let edits = changes.get(&uri).unwrap();
// Should find 2 occurrences: definition and reference
assert_eq!(edits.len(), 2);
assert!(edits.iter().all(|e| e.new_text == "Alicia"));
}
#[test]
fn test_rename_across_multiple_files() {
let file1 = "character Alice {}";
let file2 = "character Bob { friend: Alice }";
let file3 = "character Charlie { mentor: Alice }";
let documents = make_documents(vec![
("file:///file1.sb", file1),
("file:///file2.sb", file2),
("file:///file3.sb", file3),
]);
let uri = Url::parse("file:///file1.sb").unwrap();
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 11,
},
},
new_name: "Alicia".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let changes = result.unwrap().changes.unwrap();
// Should have edits in all 3 files
assert_eq!(changes.len(), 3);
// File 1: definition
assert_eq!(
changes
.get(&Url::parse("file:///file1.sb").unwrap())
.unwrap()
.len(),
1
);
// File 2: one reference
assert_eq!(
changes
.get(&Url::parse("file:///file2.sb").unwrap())
.unwrap()
.len(),
1
);
// File 3: one reference
assert_eq!(
changes
.get(&Url::parse("file:///file3.sb").unwrap())
.unwrap()
.len(),
1
);
}
#[test]
fn test_rename_template() {
let source = r#"
template Person {}
character Alice from Person {}
"#;
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 10,
},
},
new_name: "Human".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let edits = result.unwrap().changes.unwrap().get(&uri).unwrap().clone();
assert_eq!(edits.len(), 2); // Definition + usage
}
#[test]
fn test_rename_not_found() {
let source = "character Alice {}";
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 0,
character: 0,
}, // On "character" keyword
},
new_name: "NewName".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_none());
}
#[test]
fn test_prepare_rename_valid() {
let source = "character Alice {}";
let doc = Document::new(source.to_string());
let result = prepare_rename(
&doc,
Position {
line: 0,
character: 11,
},
);
assert!(result.is_some());
let range = result.unwrap();
assert_eq!(range.start.line, 0);
assert_eq!(range.end.line, 0);
}
#[test]
fn test_prepare_rename_invalid() {
let source = "character Alice {}";
let doc = Document::new(source.to_string());
// Position on keyword "character"
let result = prepare_rename(
&doc,
Position {
line: 0,
character: 0,
},
);
assert!(result.is_none());
}
#[test]
fn test_rename_respects_symbol_kind() {
// Test that we don't rename unrelated symbols with the same name in different
// contexts
let source = r#"
character Alice {}
template Person {}
character Bob { friend: Alice }
character Charlie from Person {}
"#;
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
// Try to rename the character Alice
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 11,
},
},
new_name: "Alicia".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let edits = result.unwrap().changes.unwrap().get(&uri).unwrap().clone();
// Should rename the character definition and its field reference, but not the
// template
assert_eq!(edits.len(), 2);
}
#[test]
fn test_rename_with_word_boundaries() {
let source = r#"
character Alice {}
character AliceJr {}
character NotAlice {}
"#;
let uri = Url::parse("file:///test.sb").unwrap();
let documents = make_documents(vec![("file:///test.sb", source)]);
let params = RenameParams {
text_document_position: tower_lsp::lsp_types::TextDocumentPositionParams {
text_document: tower_lsp::lsp_types::TextDocumentIdentifier { uri: uri.clone() },
position: Position {
line: 1,
character: 11,
},
},
new_name: "Alicia".to_string(),
work_done_progress_params: Default::default(),
};
let result = get_rename_edits(&documents, &params, &uri);
assert!(result.is_some());
let edits = result.unwrap().changes.unwrap().get(&uri).unwrap().clone();
// Should only rename exact "Alice", not "AliceJr" or "NotAlice"
assert_eq!(edits.len(), 1);
}
}

536
src/lsp/semantic_tokens.rs Normal file
View File

@@ -0,0 +1,536 @@
//! Semantic tokens for enhanced syntax highlighting
//!
//! Provides detailed token type information beyond what a basic grammar can
//! provide, allowing the editor to highlight different kinds of identifiers,
//! keywords, and values with appropriate semantic meaning.
use tower_lsp::lsp_types::{
SemanticToken,
SemanticTokenType,
SemanticTokens,
SemanticTokensResult,
};
use super::document::Document;
use crate::syntax::{
ast::{
Declaration,
Field,
Value,
},
lexer::{
Lexer,
Token,
},
};
/// Standard semantic token types supported by LSP
pub const LEGEND_TYPES: &[SemanticTokenType] = &[
SemanticTokenType::NAMESPACE, // use paths
SemanticTokenType::TYPE, // template names, species names, enum names
SemanticTokenType::CLASS, // character declarations
SemanticTokenType::ENUM, // enum declarations
SemanticTokenType::INTERFACE, // template declarations
SemanticTokenType::STRUCT, // institution, location declarations
SemanticTokenType::PARAMETER, // action parameters
SemanticTokenType::VARIABLE, // character names in schedules
SemanticTokenType::PROPERTY, // field names
SemanticTokenType::ENUM_MEMBER, // enum variant names
SemanticTokenType::FUNCTION, // behavior names
SemanticTokenType::METHOD, // relationship names
SemanticTokenType::KEYWORD, // keywords like "from", "include", "strict"
SemanticTokenType::STRING, // string literals
SemanticTokenType::NUMBER, // numeric literals
SemanticTokenType::OPERATOR, // operators like "..", "->", etc.
];
/// Semantic token modifiers (currently unused but available for future use)
pub const LEGEND_MODIFIERS: &[&str] = &[
"declaration",
"definition",
"readonly",
"static",
"deprecated",
"abstract",
"async",
"modification",
"documentation",
"defaultLibrary",
];
/// Helper to find identifier positions within a span using the lexer
fn find_identifiers_in_span(
text: &str,
span_start: usize,
span_end: usize,
target_names: &[String],
) -> Vec<(usize, String)> {
let span_text = &text[span_start..span_end];
let lexer = Lexer::new(span_text);
let tokens: Vec<_> = lexer.collect();
let mut results = Vec::new();
for (offset, token, _end) in tokens {
if let Token::Ident(name) = token {
if target_names.contains(&name) {
results.push((span_start + offset, name));
}
}
}
results
}
/// Recursively highlight behavior tree nodes
fn highlight_behavior_node(
builder: &mut SemanticTokensBuilder,
doc: &Document,
node: &crate::syntax::ast::BehaviorNode,
) {
use crate::syntax::ast::BehaviorNode;
match node {
| BehaviorNode::Selector { children, .. } | BehaviorNode::Sequence { children, .. } => {
for child in children {
highlight_behavior_node(builder, doc, child);
}
},
| BehaviorNode::Action(action_name, params) => {
// Action names don't have spans, so we'd need to search for them
// For now, just highlight the parameters
for param in params {
highlight_field(builder, param);
}
let _ = action_name; // Suppress warning
},
| BehaviorNode::Decorator { child, .. } => {
highlight_behavior_node(builder, doc, child);
},
| BehaviorNode::SubTree(_path) => {
// SubTree references another behavior by path
// Would need position tracking to highlight
},
| BehaviorNode::Condition(_expr) => {
// Conditions contain expressions which could be highlighted
// Would need expression traversal
},
}
}
/// Generate semantic tokens for a document
pub fn get_semantic_tokens(doc: &Document) -> Option<SemanticTokensResult> {
let ast = doc.ast.as_ref()?;
let mut builder = SemanticTokensBuilder::new(&doc.text);
let mut positions = doc.positions.clone();
// Process all top-level declarations
for decl in &ast.declarations {
match decl {
| Declaration::Use(use_decl) => {
// Highlight use paths as namespaces
let path_positions = find_identifiers_in_span(
&doc.text,
use_decl.span.start,
use_decl.span.end,
&use_decl.path,
);
for (offset, segment) in path_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
segment.len(),
token_type_index(SemanticTokenType::NAMESPACE),
0,
);
}
},
| Declaration::Character(character) => {
// Highlight character name as CLASS
builder.add_token(
character.span.start_line,
character.span.start_col,
character.name.len(),
token_type_index(SemanticTokenType::CLASS),
0,
);
// Highlight species as TYPE
if let Some(ref species) = character.species {
let species_positions = find_identifiers_in_span(
&doc.text,
character.span.start,
character.span.end,
&[species.clone()],
);
for (offset, species_name) in species_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
species_name.len(),
token_type_index(SemanticTokenType::TYPE),
0,
);
}
}
// Highlight template references
if let Some(ref templates) = character.template {
let template_positions = find_identifiers_in_span(
&doc.text,
character.span.start,
character.span.end,
templates,
);
for (offset, template_name) in template_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
template_name.len(),
token_type_index(SemanticTokenType::INTERFACE),
0,
);
}
}
// Highlight fields
for field in &character.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Template(template) => {
// Highlight template name as INTERFACE
builder.add_token(
template.span.start_line,
template.span.start_col,
template.name.len(),
token_type_index(SemanticTokenType::INTERFACE),
0,
);
// Find and highlight includes using the lexer
let include_positions = find_identifiers_in_span(
&doc.text,
template.span.start,
template.span.end,
&template.includes,
);
for (offset, include_name) in include_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
include_name.len(),
token_type_index(SemanticTokenType::INTERFACE),
0,
);
}
// Highlight fields
for field in &template.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Species(species) => {
// Highlight species name as TYPE
builder.add_token(
species.span.start_line,
species.span.start_col,
species.name.len(),
token_type_index(SemanticTokenType::TYPE),
0,
);
// Highlight fields
for field in &species.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Enum(enum_decl) => {
// Highlight enum name as ENUM
builder.add_token(
enum_decl.span.start_line,
enum_decl.span.start_col,
enum_decl.name.len(),
token_type_index(SemanticTokenType::ENUM),
0,
);
// Find and highlight enum variants using the lexer
let variant_positions = find_identifiers_in_span(
&doc.text,
enum_decl.span.start,
enum_decl.span.end,
&enum_decl.variants,
);
for (offset, variant_name) in variant_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
variant_name.len(),
token_type_index(SemanticTokenType::ENUM_MEMBER),
0,
);
}
},
| Declaration::Institution(institution) => {
// Highlight institution name as STRUCT
builder.add_token(
institution.span.start_line,
institution.span.start_col,
institution.name.len(),
token_type_index(SemanticTokenType::STRUCT),
0,
);
// Highlight fields
for field in &institution.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Location(location) => {
// Highlight location name as STRUCT
builder.add_token(
location.span.start_line,
location.span.start_col,
location.name.len(),
token_type_index(SemanticTokenType::STRUCT),
0,
);
// Highlight fields
for field in &location.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::Behavior(behavior) => {
// Highlight behavior name as FUNCTION
builder.add_token(
behavior.span.start_line,
behavior.span.start_col,
behavior.name.len(),
token_type_index(SemanticTokenType::FUNCTION),
0,
);
// TODO: Traverse behavior tree to highlight conditions and actions
// Would need recursive function to walk BehaviorNode tree
highlight_behavior_node(&mut builder, doc, &behavior.root);
},
| Declaration::Relationship(relationship) => {
// Highlight relationship name as METHOD
builder.add_token(
relationship.span.start_line,
relationship.span.start_col,
relationship.name.len(),
token_type_index(SemanticTokenType::METHOD),
0,
);
// Highlight participants
for participant in &relationship.participants {
// For qualified paths like "Alice.parent", we want to highlight each segment
// The participant has its own span, so we can search within it
let participant_names = participant.name.clone();
let name_positions = find_identifiers_in_span(
&doc.text,
participant.span.start,
participant.span.end,
&participant_names,
);
for (offset, name) in name_positions {
let (line, col) = positions.offset_to_position(offset);
builder.add_token(
line,
col,
name.len(),
token_type_index(SemanticTokenType::VARIABLE),
0,
);
}
}
// Highlight fields
for field in &relationship.fields {
highlight_field(&mut builder, field);
}
},
| Declaration::LifeArc(life_arc) => {
// Highlight life_arc name as TYPE
builder.add_token(
life_arc.span.start_line,
life_arc.span.start_col,
life_arc.name.len(),
token_type_index(SemanticTokenType::TYPE),
0,
);
// Highlight states and transitions
for state in &life_arc.states {
// State name as ENUM_MEMBER
builder.add_token(
state.span.start_line,
state.span.start_col,
state.name.len(),
token_type_index(SemanticTokenType::ENUM_MEMBER),
0,
);
// State fields
if let Some(ref fields) = state.on_enter {
for field in fields {
highlight_field(&mut builder, field);
}
}
}
},
| Declaration::Schedule(schedule) => {
// Highlight schedule name as TYPE
builder.add_token(
schedule.span.start_line,
schedule.span.start_col,
schedule.name.len(),
token_type_index(SemanticTokenType::TYPE),
0,
);
// Highlight block fields
for block in &schedule.blocks {
for field in &block.fields {
highlight_field(&mut builder, field);
}
}
},
}
}
Some(SemanticTokensResult::Tokens(SemanticTokens {
result_id: None,
data: builder.build(),
}))
}
/// Helper to highlight a field
fn highlight_field(builder: &mut SemanticTokensBuilder, field: &Field) {
// Highlight field name as PROPERTY
builder.add_token(
field.span.start_line,
field.span.start_col,
field.name.len(),
token_type_index(SemanticTokenType::PROPERTY),
0,
);
// Highlight field value
highlight_value(builder, &field.value);
}
/// Helper to highlight a value
fn highlight_value(builder: &mut SemanticTokensBuilder, value: &Value) {
match value {
| Value::String(_s) => {
// String literals are already highlighted by the grammar
// but we could add semantic highlighting here if needed
},
| Value::Int(_) | Value::Float(_) => {
// Number literals are already highlighted by the grammar
},
| Value::Bool(_) => {
// Boolean literals are already highlighted by the grammar
},
| Value::Identifier(_path) => {
// Identifiers could be highlighted as TYPE references
// but we'd need precise position tracking
},
| Value::List(items) => {
for item in items {
highlight_value(builder, item);
}
},
| Value::Object(fields) => {
for field in fields {
highlight_field(builder, field);
}
},
| Value::Range(start, end) => {
highlight_value(builder, start);
highlight_value(builder, end);
},
| Value::ProseBlock(_) => {
// Prose blocks are already highlighted by the grammar
},
| Value::Override(_) => {
// Override values need their own handling
},
| Value::Time(_) | Value::Duration(_) => {
// Time/duration literals are already highlighted by the grammar
},
}
}
/// Get the index of a semantic token type in the legend
fn token_type_index(token_type: SemanticTokenType) -> u32 {
LEGEND_TYPES
.iter()
.position(|t| t == &token_type)
.unwrap_or(0) as u32
}
/// Builder for semantic tokens with proper delta encoding
struct SemanticTokensBuilder {
tokens: Vec<(usize, usize, usize, u32, u32)>, // (line, col, length, type, modifiers)
}
impl SemanticTokensBuilder {
fn new(_text: &str) -> Self {
Self { tokens: Vec::new() }
}
fn add_token(
&mut self,
line: usize,
col: usize,
length: usize,
token_type: u32,
modifiers: u32,
) {
self.tokens.push((line, col, length, token_type, modifiers));
}
fn build(mut self) -> Vec<SemanticToken> {
// Sort tokens by position (line, then column)
self.tokens
.sort_by_key(|(line, col, _, _, _)| (*line, *col));
// Convert to delta-encoded format required by LSP
let mut result = Vec::new();
let mut prev_line = 0;
let mut prev_col = 0;
for (line, col, length, token_type, modifiers) in self.tokens {
let delta_line = line - prev_line;
let delta_start = if delta_line == 0 { col - prev_col } else { col };
result.push(SemanticToken {
delta_line: delta_line as u32,
delta_start: delta_start as u32,
length: length as u32,
token_type,
token_modifiers_bitset: modifiers,
});
prev_line = line;
prev_col = col;
}
result
}
}

472
src/lsp/server.rs Normal file
View File

@@ -0,0 +1,472 @@
//! Main LSP server implementation with full feature support
use std::{
collections::HashMap,
sync::Arc,
};
use tokio::sync::RwLock;
use tower_lsp::{
jsonrpc::Result,
lsp_types::{
CodeActionParams,
CodeActionResponse,
CompletionOptions,
CompletionParams,
CompletionResponse,
Diagnostic,
DiagnosticSeverity,
DidChangeTextDocumentParams,
DidCloseTextDocumentParams,
DidOpenTextDocumentParams,
DocumentFormattingParams,
DocumentSymbolParams,
DocumentSymbolResponse,
GotoDefinitionParams,
GotoDefinitionResponse,
Hover,
HoverParams,
HoverProviderCapability,
InitializeParams,
InitializeResult,
InitializedParams,
InlayHint,
InlayHintParams,
Location,
MessageType,
OneOf,
Position,
Range,
ReferenceParams,
RenameParams,
SemanticTokensFullOptions,
SemanticTokensLegend,
SemanticTokensOptions,
SemanticTokensParams,
SemanticTokensResult,
SemanticTokensServerCapabilities,
ServerCapabilities,
ServerInfo,
TextDocumentSyncCapability,
TextDocumentSyncKind,
TextEdit,
Url,
WorkDoneProgressOptions,
WorkspaceEdit,
},
Client,
LanguageServer,
};
use super::{
code_actions,
completion,
definition,
document::{
Document,
ErrorSeverity,
},
formatting,
hover,
inlay_hints,
references,
rename,
semantic_tokens,
symbols,
};
use crate::resolve::names::NameTable;
/// Workspace-level state tracking all documents and cross-file references
#[derive(Debug)]
struct WorkspaceState {
/// Combined name table from all open documents
name_table: NameTable,
/// Mapping from file index to URL
file_urls: HashMap<usize, Url>,
}
impl WorkspaceState {
fn new() -> Self {
Self {
name_table: NameTable::new(),
file_urls: HashMap::new(),
}
}
/// Rebuild the workspace name table from all documents
fn rebuild(&mut self, documents: &HashMap<Url, Document>) {
self.name_table = NameTable::new();
self.file_urls.clear();
// Build name table from all parsed documents
for (file_index, (url, doc)) in documents.iter().enumerate() {
self.file_urls.insert(file_index, url.clone());
if let Some(ref ast) = doc.ast {
// Build name table for this file
// TODO: Properly merge file name tables with file_index
// For now, we'll use the simple single-file approach
let _ = (ast, file_index); // Use variables to avoid warnings
}
}
}
}
/// The main language server instance
pub struct StorybookLanguageServer {
client: Client,
documents: Arc<RwLock<HashMap<Url, Document>>>,
workspace: Arc<RwLock<WorkspaceState>>,
}
impl StorybookLanguageServer {
pub fn new(client: Client) -> Self {
Self {
client,
documents: Arc::new(RwLock::new(HashMap::new())),
workspace: Arc::new(RwLock::new(WorkspaceState::new())),
}
}
/// Rebuild workspace state after document changes
async fn rebuild_workspace(&self) {
let documents = self.documents.read().await;
let mut workspace = self.workspace.write().await;
workspace.rebuild(&documents);
}
/// Publish diagnostics for a document
async fn publish_diagnostics(&self, uri: &Url, doc: &Document) {
let mut positions = doc.positions.clone();
let mut diagnostics = Vec::new();
// Add parse errors
for error in &doc.parse_errors {
let (start_line, start_col) = positions.offset_to_position(error.start);
let (end_line, end_col) = positions.offset_to_position(error.end);
diagnostics.push(Diagnostic {
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
severity: Some(match error.severity {
| ErrorSeverity::Error => DiagnosticSeverity::ERROR,
| ErrorSeverity::Warning => DiagnosticSeverity::WARNING,
}),
code: None,
source: Some("storybook-parser".to_string()),
message: error.message.clone(),
related_information: None,
tags: None,
code_description: None,
data: None,
});
}
// Add semantic validation errors
for error in &doc.resolve_errors {
// For now, show resolve errors at the start of the document
// TODO: Extract proper position information from ResolveError spans
diagnostics.push(Diagnostic {
range: Range {
start: Position {
line: 0,
character: 0,
},
end: Position {
line: 0,
character: 1,
},
},
severity: Some(DiagnosticSeverity::ERROR),
code: None,
source: Some("storybook-validator".to_string()),
message: format!("{}", error),
related_information: None,
tags: None,
code_description: None,
data: None,
});
}
self.client
.publish_diagnostics(uri.clone(), diagnostics, None)
.await;
}
}
#[tower_lsp::async_trait]
impl LanguageServer for StorybookLanguageServer {
async fn initialize(&self, _params: InitializeParams) -> Result<InitializeResult> {
Ok(InitializeResult {
capabilities: ServerCapabilities {
text_document_sync: Some(TextDocumentSyncCapability::Kind(
TextDocumentSyncKind::FULL,
)),
hover_provider: Some(HoverProviderCapability::Simple(true)),
completion_provider: Some(CompletionOptions {
trigger_characters: Some(vec![
".".to_string(),
":".to_string(),
"@".to_string(),
]),
..Default::default()
}),
document_symbol_provider: Some(OneOf::Left(true)),
definition_provider: Some(OneOf::Left(true)),
references_provider: Some(OneOf::Left(true)),
document_formatting_provider: Some(OneOf::Left(true)),
rename_provider: Some(OneOf::Right(tower_lsp::lsp_types::RenameOptions {
prepare_provider: Some(true),
work_done_progress_options: Default::default(),
})),
code_action_provider: Some(
tower_lsp::lsp_types::CodeActionProviderCapability::Simple(true),
),
semantic_tokens_provider: Some(
SemanticTokensServerCapabilities::SemanticTokensOptions(
SemanticTokensOptions {
work_done_progress_options: WorkDoneProgressOptions::default(),
legend: SemanticTokensLegend {
token_types: semantic_tokens::LEGEND_TYPES.to_vec(),
token_modifiers: semantic_tokens::LEGEND_MODIFIERS
.iter()
.map(|s| tower_lsp::lsp_types::SemanticTokenModifier::new(s))
.collect(),
},
range: Some(false),
full: Some(SemanticTokensFullOptions::Bool(true)),
},
),
),
inlay_hint_provider: Some(OneOf::Left(true)),
..Default::default()
},
server_info: Some(ServerInfo {
name: "storybook-lsp".to_string(),
version: Some(env!("CARGO_PKG_VERSION").to_string()),
}),
})
}
async fn initialized(&self, _params: InitializedParams) {
self.client
.log_message(MessageType::INFO, "Storybook LSP server initialized! 🎉")
.await;
}
async fn shutdown(&self) -> Result<()> {
Ok(())
}
async fn did_open(&self, params: DidOpenTextDocumentParams) {
let uri = params.text_document.uri;
let text = params.text_document.text;
// Parse and store document
let doc = Document::new(text);
// Publish diagnostics
self.publish_diagnostics(&uri, &doc).await;
// Store document
self.documents.write().await.insert(uri, doc);
// Rebuild workspace for cross-file features
self.rebuild_workspace().await;
}
async fn did_change(&self, params: DidChangeTextDocumentParams) {
let uri = params.text_document.uri;
if let Some(change) = params.content_changes.into_iter().next() {
let text = change.text;
// Update document
let mut documents = self.documents.write().await;
if let Some(doc) = documents.get_mut(&uri) {
doc.update(text);
self.publish_diagnostics(&uri, doc).await;
}
drop(documents); // Release lock before rebuilding
}
// Rebuild workspace for cross-file features
self.rebuild_workspace().await;
}
async fn did_close(&self, params: DidCloseTextDocumentParams) {
// Remove document from cache
self.documents
.write()
.await
.remove(&params.text_document.uri);
// Rebuild workspace for cross-file features
self.rebuild_workspace().await;
}
async fn hover(&self, params: HoverParams) -> Result<Option<Hover>> {
let uri = params.text_document_position_params.text_document.uri;
let position = params.text_document_position_params.position;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
// Try keyword hover first
if let Some(hover) = hover::get_hover_info(
&doc.text,
position.line as usize,
position.character as usize,
) {
return Ok(Some(hover));
}
// Try semantic hover (symbols)
if let Some(hover) = hover::get_semantic_hover_info(
doc,
position.line as usize,
position.character as usize,
) {
return Ok(Some(hover));
}
}
Ok(None)
}
async fn completion(&self, params: CompletionParams) -> Result<Option<CompletionResponse>> {
let uri = params.text_document_position.text_document.uri.clone();
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
return Ok(completion::get_completions(doc, &params));
}
Ok(None)
}
async fn document_symbol(
&self,
params: DocumentSymbolParams,
) -> Result<Option<DocumentSymbolResponse>> {
let uri = params.text_document.uri;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
if let Some(ref ast) = doc.ast {
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
return Ok(Some(DocumentSymbolResponse::Nested(symbols)));
}
}
Ok(None)
}
async fn goto_definition(
&self,
params: GotoDefinitionParams,
) -> Result<Option<GotoDefinitionResponse>> {
let uri = params
.text_document_position_params
.text_document
.uri
.clone();
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
return Ok(definition::get_definition(doc, &params, &uri));
}
Ok(None)
}
async fn references(&self, params: ReferenceParams) -> Result<Option<Vec<Location>>> {
let uri = params.text_document_position.text_document.uri.clone();
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
return Ok(references::find_references(doc, &params, &uri));
}
Ok(None)
}
async fn formatting(&self, params: DocumentFormattingParams) -> Result<Option<Vec<TextEdit>>> {
let uri = params.text_document.uri;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
return Ok(formatting::format_document(doc, &params.options));
}
Ok(None)
}
async fn prepare_rename(
&self,
params: tower_lsp::lsp_types::TextDocumentPositionParams,
) -> Result<Option<tower_lsp::lsp_types::PrepareRenameResponse>> {
let uri = params.text_document.uri;
let position = params.position;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
if let Some(range) = rename::prepare_rename(doc, position) {
return Ok(Some(tower_lsp::lsp_types::PrepareRenameResponse::Range(
range,
)));
}
}
Ok(None)
}
async fn rename(&self, params: RenameParams) -> Result<Option<WorkspaceEdit>> {
let uri = params.text_document_position.text_document.uri.clone();
let documents = self.documents.read().await;
// Pass all documents for workspace-wide rename
Ok(rename::get_rename_edits(&documents, &params, &uri))
}
async fn code_action(&self, params: CodeActionParams) -> Result<Option<CodeActionResponse>> {
let documents = self.documents.read().await;
// Generate code actions for the given position
Ok(code_actions::get_code_actions(&documents, &params))
}
async fn semantic_tokens_full(
&self,
params: SemanticTokensParams,
) -> Result<Option<SemanticTokensResult>> {
let uri = params.text_document.uri;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
Ok(semantic_tokens::get_semantic_tokens(doc))
} else {
Ok(None)
}
}
async fn inlay_hint(&self, params: InlayHintParams) -> Result<Option<Vec<InlayHint>>> {
let uri = params.text_document.uri;
let range = params.range;
let documents = self.documents.read().await;
if let Some(doc) = documents.get(&uri) {
Ok(inlay_hints::get_inlay_hints(doc, range.start, range.end))
} else {
Ok(None)
}
}
}

324
src/lsp/symbols.rs Normal file
View File

@@ -0,0 +1,324 @@
//! Document symbol extraction for outline view
use tower_lsp::lsp_types::{
DocumentSymbol,
Position,
Range,
SymbolKind,
};
use crate::{
position::PositionTracker,
syntax::ast::*,
};
/// Extract document symbols from AST
pub fn extract_symbols_from_ast(
ast: &File,
positions: &mut PositionTracker,
) -> Vec<DocumentSymbol> {
let mut symbols = Vec::new();
for decl in &ast.declarations {
if let Some(symbol) = extract_declaration_symbol(decl, positions) {
symbols.push(symbol);
}
}
symbols
}
/// Extract a symbol from a declaration
#[allow(deprecated)]
fn extract_declaration_symbol(
decl: &Declaration,
positions: &mut PositionTracker,
) -> Option<DocumentSymbol> {
let (name, kind, span, children) = match decl {
| Declaration::Character(c) => (
c.name.clone(),
SymbolKind::CLASS,
c.span.clone(),
extract_field_symbols(&c.fields, positions),
),
| Declaration::Template(t) => (
t.name.clone(),
SymbolKind::INTERFACE,
t.span.clone(),
extract_field_symbols(&t.fields, positions),
),
| Declaration::LifeArc(l) => (
l.name.clone(),
SymbolKind::FUNCTION,
l.span.clone(),
extract_state_symbols(&l.states, positions),
),
| Declaration::Schedule(s) => (
s.name.clone(),
SymbolKind::EVENT,
s.span.clone(),
extract_block_symbols(&s.blocks, positions),
),
| Declaration::Behavior(b) => (
b.name.clone(),
SymbolKind::MODULE,
b.span.clone(),
Vec::new(), // Behavior tree structure is too complex for symbol tree
),
| Declaration::Institution(i) => (
i.name.clone(),
SymbolKind::MODULE,
i.span.clone(),
extract_field_symbols(&i.fields, positions),
),
| Declaration::Relationship(r) => (
r.name.clone(),
SymbolKind::STRUCT,
r.span.clone(),
extract_field_symbols(&r.fields, positions),
),
| Declaration::Location(l) => (
l.name.clone(),
SymbolKind::CONSTANT,
l.span.clone(),
extract_field_symbols(&l.fields, positions),
),
| Declaration::Species(s) => (
s.name.clone(),
SymbolKind::CLASS,
s.span.clone(),
extract_field_symbols(&s.fields, positions),
),
| Declaration::Enum(e) => (
e.name.clone(),
SymbolKind::ENUM,
e.span.clone(),
extract_variant_symbols(&e.variants, positions),
),
| Declaration::Use(_) => return None, // Use statements don't create symbols
};
let (start_line, start_col) = positions.offset_to_position(span.start);
let (end_line, end_col) = positions.offset_to_position(span.end);
// Selection range is just the name
let name_end_offset = span.start + name.len();
let (name_end_line, name_end_col) = positions.offset_to_position(name_end_offset);
Some(DocumentSymbol {
name: name.clone(),
detail: None,
kind,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
selection_range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: name_end_line as u32,
character: name_end_col as u32,
},
},
children: if children.is_empty() {
None
} else {
Some(children)
},
})
}
/// Extract symbols from field declarations
#[allow(deprecated)]
fn extract_field_symbols(fields: &[Field], positions: &mut PositionTracker) -> Vec<DocumentSymbol> {
fields
.iter()
.map(|field| {
let (start_line, start_col) = positions.offset_to_position(field.span.start);
let (end_line, end_col) = positions.offset_to_position(field.span.end);
let name_end = field.span.start + field.name.len();
let (name_end_line, name_end_col) = positions.offset_to_position(name_end);
DocumentSymbol {
name: field.name.clone(),
detail: None,
kind: SymbolKind::FIELD,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
selection_range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: name_end_line as u32,
character: name_end_col as u32,
},
},
children: None,
}
})
.collect()
}
/// Extract symbols from life arc states
#[allow(deprecated)]
fn extract_state_symbols(
states: &[ArcState],
positions: &mut PositionTracker,
) -> Vec<DocumentSymbol> {
states
.iter()
.map(|state| {
let (start_line, start_col) = positions.offset_to_position(state.span.start);
let (end_line, end_col) = positions.offset_to_position(state.span.end);
let name_end = state.span.start + state.name.len();
let (name_end_line, name_end_col) = positions.offset_to_position(name_end);
DocumentSymbol {
name: state.name.clone(),
detail: Some("state".to_string()),
kind: SymbolKind::PROPERTY,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
selection_range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: name_end_line as u32,
character: name_end_col as u32,
},
},
children: None,
}
})
.collect()
}
/// Extract symbols from schedule blocks
#[allow(deprecated)]
fn extract_block_symbols(
blocks: &[ScheduleBlock],
positions: &mut PositionTracker,
) -> Vec<DocumentSymbol> {
blocks
.iter()
.map(|block| {
let (start_line, start_col) = positions.offset_to_position(block.span.start);
let (end_line, end_col) = positions.offset_to_position(block.span.end);
// For blocks, the "name" is the activity
let name = block.activity.clone();
let name_end = block.span.start + name.len();
let (name_end_line, name_end_col) = positions.offset_to_position(name_end);
DocumentSymbol {
name,
detail: Some(format!(
"{:02}:{:02}-{:02}:{:02}",
block.start.hour, block.start.minute, block.end.hour, block.end.minute
)),
kind: SymbolKind::PROPERTY,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: end_line as u32,
character: end_col as u32,
},
},
selection_range: Range {
start: Position {
line: start_line as u32,
character: start_col as u32,
},
end: Position {
line: name_end_line as u32,
character: name_end_col as u32,
},
},
children: None,
}
})
.collect()
}
/// Extract symbols from enum variants (simple string list)
#[allow(deprecated)]
fn extract_variant_symbols(
variants: &[String],
_positions: &PositionTracker,
) -> Vec<DocumentSymbol> {
// For enum variants, we don't have span information for individual variants
// since they're just strings. Return an empty vec for now.
// In the future, we could enhance the parser to track variant spans.
variants
.iter()
.enumerate()
.map(|(i, variant)| DocumentSymbol {
name: variant.clone(),
detail: None,
kind: SymbolKind::ENUM_MEMBER,
tags: None,
deprecated: None,
range: Range {
start: Position {
line: i as u32,
character: 0,
},
end: Position {
line: i as u32,
character: variant.len() as u32,
},
},
selection_range: Range {
start: Position {
line: i as u32,
character: 0,
},
end: Position {
line: i as u32,
character: variant.len() as u32,
},
},
children: None,
})
.collect()
}

623
src/lsp/tests.rs Normal file
View File

@@ -0,0 +1,623 @@
//! Comprehensive test suite for LSP server functionality
use document::Document;
use tower_lsp::lsp_types::*;
use super::*;
// Test data fixtures
const SAMPLE_STORYBOOK: &str = r#"
species Human {
intelligence: high
lifespan: 80
}
enum Mood {
Happy,
Sad,
Angry
}
character Alice: Human {
age: 7
mood: Happy
---backstory
A curious girl who loves adventures
---
}
template Child {
age: 5..12
guardian: Human
}
character Bob: Human from Child {
age: 10
guardian: Alice
}
life_arc Growing {
state child {
on enter {
age: 5
}
}
state teen {
on enter {
age: 13
}
}
state adult {
on enter {
age: 18
}
}
}
schedule DailyRoutine {
08:00 -> 09:00: breakfast {
activity: eating
}
09:00 -> 12:00: school {
activity: learning
}
}
relationship Friendship {
Alice as friend {
bond_strength: 5
}
Bob as friend {
bond_strength: 5
}
}
"#;
#[cfg(test)]
mod document_tests {
use super::*;
#[test]
fn test_document_creation() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
if !doc.parse_errors.is_empty() {
eprintln!("Parse errors:");
for err in &doc.parse_errors {
eprintln!(" - {}", err.message);
}
}
assert_eq!(doc.text, SAMPLE_STORYBOOK);
assert!(doc.ast.is_some(), "AST should be parsed");
}
#[test]
fn test_document_with_errors() {
let invalid = "character { invalid syntax }";
let doc = Document::new(invalid.to_string());
assert!(doc.ast.is_none(), "Invalid syntax should not produce AST");
assert!(!doc.parse_errors.is_empty(), "Should have parse errors");
}
#[test]
fn test_document_update() {
let mut doc = Document::new("character Alice {}".to_string());
doc.update("character Bob {}".to_string());
assert_eq!(doc.text, "character Bob {}");
assert!(doc.name_table.resolve_name("Bob").is_some());
assert!(doc.name_table.resolve_name("Alice").is_none());
}
#[test]
fn test_symbol_extraction() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
assert!(doc.name_table.resolve_name("Alice").is_some());
assert!(doc.name_table.resolve_name("Bob").is_some());
assert!(doc.name_table.resolve_name("Child").is_some());
assert!(doc.name_table.resolve_name("Growing").is_some());
assert!(doc.name_table.resolve_name("DailyRoutine").is_some());
assert!(doc.name_table.resolve_name("Human").is_some());
assert!(doc.name_table.resolve_name("Mood").is_some());
assert!(doc.name_table.resolve_name("Friendship").is_some());
}
#[test]
fn test_symbol_kinds() {
use crate::resolve::names::DeclKind;
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let alice = doc.name_table.resolve_name("Alice").unwrap();
assert_eq!(alice.kind, DeclKind::Character);
let child = doc.name_table.resolve_name("Child").unwrap();
assert_eq!(child.kind, DeclKind::Template);
let growing = doc.name_table.resolve_name("Growing").unwrap();
assert_eq!(growing.kind, DeclKind::LifeArc);
}
#[test]
fn test_word_at_offset() {
let doc = Document::new("character Alice {}".to_string());
// Test finding "character" keyword
let word = doc.word_at_offset(5);
assert_eq!(word, Some("character".to_string()));
// Test finding "Alice" identifier
let word = doc.word_at_offset(12);
assert_eq!(word, Some("Alice".to_string()));
// Test whitespace returns None
let word = doc.word_at_offset(9);
assert_eq!(word, None);
}
}
#[cfg(test)]
mod position_tests {
use crate::position::PositionTracker;
#[test]
fn test_position_tracking_single_line() {
let mut tracker = PositionTracker::new("hello world");
assert_eq!(tracker.offset_to_position(0), (0, 0));
assert_eq!(tracker.offset_to_position(6), (0, 6));
assert_eq!(tracker.offset_to_position(11), (0, 11));
}
#[test]
fn test_position_tracking_multiline() {
let mut tracker = PositionTracker::new("line 1\nline 2\nline 3");
// Start of first line
assert_eq!(tracker.offset_to_position(0), (0, 0));
// Start of second line (after \n at offset 6)
assert_eq!(tracker.offset_to_position(7), (1, 0));
// Start of third line (after \n at offset 13)
assert_eq!(tracker.offset_to_position(14), (2, 0));
// Middle of second line
assert_eq!(tracker.offset_to_position(10), (1, 3));
}
#[test]
fn test_line_count() {
let tracker = PositionTracker::new("line 1\nline 2\nline 3");
assert_eq!(tracker.line_count(), 3);
}
#[test]
fn test_line_offset() {
let tracker = PositionTracker::new("line 1\nline 2\nline 3");
assert_eq!(tracker.line_offset(0), Some(0));
assert_eq!(tracker.line_offset(1), Some(7));
assert_eq!(tracker.line_offset(2), Some(14));
assert_eq!(tracker.line_offset(3), None);
}
}
#[cfg(test)]
mod hover_tests {
use super::*;
#[test]
fn test_hover_keywords() {
// Test character keyword
let hover = hover::get_hover_info("character Alice {}", 0, 5);
assert!(hover.is_some());
let hover = hover.unwrap();
if let HoverContents::Markup(content) = hover.contents {
assert!(content.value.contains("character"));
assert!(content.value.contains("Defines a character entity"));
}
// Test template keyword
let hover = hover::get_hover_info("template Child {}", 0, 2);
assert!(hover.is_some());
// Test life_arc keyword
let hover = hover::get_hover_info("life_arc Growing {}", 0, 5);
assert!(hover.is_some());
}
#[test]
fn test_hover_non_keyword() {
let hover = hover::get_hover_info("character Alice {}", 0, 12);
assert!(hover.is_none());
}
#[test]
fn test_hover_invalid_position() {
let hover = hover::get_hover_info("character Alice {}", 0, 100);
assert!(hover.is_none());
}
}
#[cfg(test)]
mod completion_tests {
use super::*;
#[test]
fn test_keyword_completions() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let params = CompletionParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: 0,
character: 0,
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
context: None,
};
let result = completion::get_completions(&doc, &params);
assert!(result.is_some());
if let Some(CompletionResponse::Array(items)) = result {
// Should have keyword completions
assert!(items.iter().any(|item| item.label == "character"));
assert!(items.iter().any(|item| item.label == "template"));
assert!(items.iter().any(|item| item.label == "life_arc"));
// Should have entity completions from document
assert!(items.iter().any(|item| item.label == "Alice"));
assert!(items.iter().any(|item| item.label == "Bob"));
}
}
#[test]
fn test_completion_includes_snippets() {
let doc = Document::new("".to_string());
let params = CompletionParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position::default(),
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
context: None,
};
let result = completion::get_completions(&doc, &params);
if let Some(CompletionResponse::Array(items)) = result {
// Check that character completion has a snippet
let character_item = items.iter().find(|item| item.label == "character");
assert!(character_item.is_some());
let character_item = character_item.unwrap();
assert!(character_item.insert_text.is_some());
assert_eq!(
character_item.insert_text_format,
Some(InsertTextFormat::SNIPPET)
);
}
}
}
#[cfg(test)]
mod formatting_tests {
use super::*;
#[test]
fn test_basic_formatting() {
let doc = Document::new("character Alice{age:7}".to_string());
let options = FormattingOptions {
tab_size: 4,
insert_spaces: true,
..Default::default()
};
let result = formatting::format_document(&doc, &options);
assert!(result.is_some());
let edits = result.unwrap();
assert_eq!(edits.len(), 1);
let formatted = &edits[0].new_text;
eprintln!("Formatted output:\n{}", formatted);
assert!(formatted.contains("character Alice {") || formatted.contains("character Alice{"));
assert!(formatted.contains("age: 7") || formatted.contains("age:7"));
}
#[test]
fn test_formatting_indentation() {
let doc = Document::new("character Alice {\nage: 7\n}".to_string());
let options = FormattingOptions {
tab_size: 4,
insert_spaces: true,
..Default::default()
};
let result = formatting::format_document(&doc, &options);
assert!(result.is_some());
let formatted = &result.unwrap()[0].new_text;
// Check that age is indented with 4 spaces
assert!(formatted.contains(" age: 7"));
}
#[test]
fn test_formatting_preserves_prose() {
let doc = Document::new(
"character Alice {\n---backstory\nSome irregular spacing\n---\n}".to_string(),
);
let options = FormattingOptions::default();
let result = formatting::format_document(&doc, &options);
let formatted = &result.unwrap()[0].new_text;
// Prose content should be preserved exactly
assert!(formatted.contains("Some irregular spacing"));
}
#[test]
fn test_no_formatting_needed() {
let already_formatted = "character Alice {\n age: 7\n}\n";
let doc = Document::new(already_formatted.to_string());
let options = FormattingOptions::default();
let result = formatting::format_document(&doc, &options);
// Should return None if no changes needed
assert!(result.is_none());
}
}
#[cfg(test)]
mod symbols_tests {
use super::*;
#[test]
fn test_extract_symbols_from_ast() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
// Should have top-level declarations
assert!(symbols.iter().any(|s| s.name == "Alice"));
assert!(symbols.iter().any(|s| s.name == "Child"));
assert!(symbols.iter().any(|s| s.name == "Bob"));
assert!(symbols.iter().any(|s| s.name == "Growing"));
assert!(symbols.iter().any(|s| s.name == "DailyRoutine"));
assert!(symbols.iter().any(|s| s.name == "Human"));
assert!(symbols.iter().any(|s| s.name == "Mood"));
assert!(symbols.iter().any(|s| s.name == "Friendship"));
}
#[test]
fn test_symbol_hierarchy() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
// Alice should have children (fields)
let alice = symbols.iter().find(|s| s.name == "Alice").unwrap();
assert!(alice.children.is_some());
let children = alice.children.as_ref().unwrap();
assert!(children.iter().any(|c| c.name == "age"));
assert!(children.iter().any(|c| c.name == "backstory"));
}
#[test]
fn test_symbol_kinds() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
let alice = symbols.iter().find(|s| s.name == "Alice").unwrap();
assert_eq!(alice.kind, SymbolKind::CLASS);
let child = symbols.iter().find(|s| s.name == "Child").unwrap();
assert_eq!(child.kind, SymbolKind::INTERFACE);
let mood = symbols.iter().find(|s| s.name == "Mood").unwrap();
assert_eq!(mood.kind, SymbolKind::ENUM);
}
#[test]
fn test_life_arc_states() {
let doc = Document::new(SAMPLE_STORYBOOK.to_string());
let ast = doc.ast.as_ref().unwrap();
let mut positions = doc.positions.clone();
let symbols = symbols::extract_symbols_from_ast(ast, &mut positions);
let growing = symbols.iter().find(|s| s.name == "Growing").unwrap();
assert!(growing.children.is_some());
let states = growing.children.as_ref().unwrap();
assert!(states.iter().any(|s| s.name == "child"));
assert!(states.iter().any(|s| s.name == "teen"));
assert!(states.iter().any(|s| s.name == "adult"));
}
}
#[cfg(test)]
mod definition_tests {
use super::*;
#[test]
fn test_goto_definition_character() {
let mut doc = Document::new(SAMPLE_STORYBOOK.to_string());
// Find position of "Alice" in "character Alice"
let alice_offset = doc.text.find("character Alice").unwrap() + "character ".len();
let (line, col) = doc.positions.offset_to_position(alice_offset);
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: line as u32,
character: col as u32,
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
};
let uri = Url::parse("file:///test.sb").unwrap();
let result = definition::get_definition(&mut doc, &params, &uri);
assert!(result.is_some());
}
#[test]
fn test_goto_definition_not_found() {
let mut doc = Document::new("character Alice {}".to_string());
let params = GotoDefinitionParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: 0,
character: 0, // On "character" keyword, not a symbol
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
};
let uri = Url::parse("file:///test.sb").unwrap();
let result = definition::get_definition(&mut doc, &params, &uri);
assert!(result.is_none());
}
}
#[cfg(test)]
mod references_tests {
use super::*;
#[test]
fn test_find_references() {
let source = "character Alice {}\ncharacter Bob { friend: Alice }";
let mut doc = Document::new(source.to_string());
// Find position of first "Alice"
let alice_offset = source.find("Alice").unwrap();
let (line, col) = doc.positions.offset_to_position(alice_offset);
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: line as u32,
character: col as u32,
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let uri = Url::parse("file:///test.sb").unwrap();
let result = references::find_references(&mut doc, &params, &uri);
assert!(result.is_some());
let locations = result.unwrap();
// Should find both occurrences of "Alice"
assert_eq!(locations.len(), 2);
}
#[test]
fn test_find_references_word_boundaries() {
let source = "character Alice {}\ncharacter Alicia {}";
let mut doc = Document::new(source.to_string());
let alice_offset = source.find("Alice").unwrap();
let (line, col) = doc.positions.offset_to_position(alice_offset);
let params = ReferenceParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: Url::parse("file:///test.sb").unwrap(),
},
position: Position {
line: line as u32,
character: col as u32,
},
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
context: ReferenceContext {
include_declaration: true,
},
};
let uri = Url::parse("file:///test.sb").unwrap();
let result = references::find_references(&mut doc, &params, &uri);
let locations = result.unwrap();
// Should only find "Alice", not "Alicia"
assert_eq!(locations.len(), 1);
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
#[test]
fn test_full_workflow() {
// Create a document
let mut doc = Document::new(SAMPLE_STORYBOOK.to_string());
// Verify parsing worked
assert!(doc.ast.is_some());
assert!(doc.parse_errors.is_empty());
// Verify symbols were extracted
assert!(doc.name_table.all_entries().count() > 0);
// Test updating document
doc.update("character NewChar {}".to_string());
assert!(doc.name_table.resolve_name("NewChar").is_some());
// Verify new AST was parsed
assert!(doc.ast.is_some());
}
#[test]
fn test_error_recovery() {
let invalid = "character { invalid }";
let doc = Document::new(invalid.to_string());
// Should handle errors gracefully
assert!(doc.ast.is_none());
assert!(!doc.parse_errors.is_empty());
// Symbols should be empty for invalid document
assert_eq!(doc.name_table.all_entries().count(), 0);
}
}

206
src/lsp/validation_tests.rs Normal file
View File

@@ -0,0 +1,206 @@
//! Tests for semantic validation integration
#[cfg(test)]
mod tests {
use crate::lsp::document::Document;
#[test]
fn test_reserved_keyword_caught_by_parser() {
// Reserved keywords are caught by the parser, not the validator
// This test verifies that parse errors catch reserved keywords
let source = r#"
character Alice {
self: "Bad field name"
}
"#;
let doc = Document::new(source.to_string());
// Should have parse error for reserved keyword
assert!(
!doc.parse_errors.is_empty(),
"Parser should catch reserved keyword 'self' as field name"
);
}
#[test]
fn test_valid_fields_no_validation_errors() {
let source = r#"
character Alice {
age: 7
name: "Alice"
}
"#;
let doc = Document::new(source.to_string());
// Should have no validation errors
assert!(
doc.resolve_errors.is_empty(),
"Valid code should have no validation errors"
);
}
#[test]
fn test_trait_range_validation() {
let source = r#"
character Alice {
bond: 1.5
}
"#;
let doc = Document::new(source.to_string());
// Should have error for bond value out of range
assert!(
!doc.resolve_errors.is_empty(),
"Should detect bond value out of range [0.0, 1.0]"
);
let error_message = format!("{}", doc.resolve_errors[0]);
assert!(
error_message.contains("1.5") || error_message.contains("range"),
"Error should mention the value or range: {}",
error_message
);
}
#[test]
fn test_valid_trait_ranges() {
let source = r#"
character Alice {
bond: 0.75
trust: 0.0
love: 1.0
}
"#;
let doc = Document::new(source.to_string());
// Should have no validation errors for valid trait ranges
assert!(
doc.resolve_errors.is_empty(),
"Valid trait values should produce no errors"
);
}
#[test]
fn test_life_arc_transition_validation() {
let source = r#"
life_arc Growing {
state child {
on birthday -> adult
}
}
"#;
let doc = Document::new(source.to_string());
// Should have error for transition to unknown state 'adult'
assert!(
!doc.resolve_errors.is_empty(),
"Should detect transition to undefined state"
);
let error_message = format!("{}", doc.resolve_errors[0]);
assert!(
error_message.contains("adult") || error_message.contains("unknown"),
"Error should mention unknown state"
);
}
#[test]
fn test_valid_life_arc_transitions() {
let source = r#"
life_arc Growing {
state child {
on birthday -> teen
}
state teen {
on birthday -> adult
}
state adult {}
}
"#;
let doc = Document::new(source.to_string());
// Should have no validation errors
assert!(
doc.resolve_errors.is_empty(),
"Valid life arc should produce no errors"
);
}
#[test]
fn test_schedule_overlap_validation() {
let source = r#"
schedule Daily {
08:00 -> 10:00: morning {}
09:00 -> 11:00: overlap {}
}
"#;
let doc = Document::new(source.to_string());
// Should have error for overlapping schedule blocks
assert!(
!doc.resolve_errors.is_empty(),
"Should detect overlapping schedule blocks"
);
let error_message = format!("{}", doc.resolve_errors[0]);
assert!(
error_message.contains("overlap"),
"Error should mention overlap"
);
}
#[test]
fn test_valid_schedule_no_overlaps() {
let source = r#"
schedule Daily {
08:00 -> 10:00: morning {}
10:00 -> 12:00: midday {}
12:00 -> 14:00: afternoon {}
}
"#;
let doc = Document::new(source.to_string());
// Should have no validation errors
assert!(
doc.resolve_errors.is_empty(),
"Non-overlapping schedule should produce no errors"
);
}
#[test]
fn test_multiple_validation_errors() {
// Test multiple validation errors at once
let source = r#"
character Alice {
bond: 2.0
trust: -0.5
}
"#;
let doc = Document::new(source.to_string());
// Should have multiple errors for out-of-range values
assert!(
doc.resolve_errors.len() >= 2,
"Should detect multiple range errors. Got {} errors",
doc.resolve_errors.len()
);
}
#[test]
fn test_parse_and_validation_errors_separate() {
let source = r#"
character Alice {
character: "Reserved"
invalid syntax here
}
"#;
let doc = Document::new(source.to_string());
// Parse should fail, so we won't have validation errors
// (validation only runs on successfully parsed AST)
assert!(
!doc.parse_errors.is_empty() || !doc.resolve_errors.is_empty(),
"Should have either parse or validation errors"
);
}
}