Files
mistralai-client-rs/tests/v1_client_chat_async_test.rs
Sienna Meridian Satterwhite 79bc40bb15
Some checks failed
Test / Test Documentation (push) Has been cancelled
Test / Test Examples (push) Has been cancelled
Test / Test (push) Has been cancelled
Update to latest Mistral AI API (v1.0.0)
- Replace closed Model enum with flexible string-based Model type
  with constructor methods for all current models (Mistral Large 3,
  Small 4, Magistral, Codestral, Devstral, Pixtral, Voxtral, etc.)
- Add new API endpoints: FIM completions, Files, Fine-tuning, Batch
  jobs, OCR, Audio transcription, Moderations/Classifications, and
  Agent completions (sync + async for all)
- Add new chat fields: frequency_penalty, presence_penalty, stop,
  n, parallel_tool_calls, reasoning_effort, min_tokens, json_schema
  response format
- Add embedding fields: output_dimension, output_dtype
- Tool parameters now accept raw JSON Schema (serde_json::Value)
  instead of limited enum types
- Add tool call IDs and Required tool choice variant
- Add DELETE HTTP method support and multipart file upload
- Bump thiserror to v2, add reqwest multipart feature
- Remove strum dependency (no longer needed)
- Update all tests and examples for new API
2026-03-20 17:16:26 +00:00

102 lines
3.0 KiB
Rust

use jrest::expect;
use mistralai_client::v1::{
chat::{ChatMessage, ChatMessageRole, ChatParams, ChatResponseChoiceFinishReason},
client::Client,
constants::Model,
tool::{Tool, ToolChoice},
};
mod setup;
#[tokio::test]
async fn test_client_chat_async() {
setup::setup();
let client = Client::new(None, None, None, None).unwrap();
let model = Model::mistral_small_latest();
let messages = vec![ChatMessage::new_user_message(
"Guess the next word: \"Eiffel ...\"?",
)];
let options = ChatParams {
temperature: Some(0.0),
random_seed: Some(42),
..Default::default()
};
let response = client
.chat_async(model, messages, Some(options))
.await
.unwrap();
expect!(response.object).to_be("chat.completion".to_string());
expect!(response.choices.len()).to_be(1);
expect!(response.choices[0].index).to_be(0);
expect!(response.choices[0].finish_reason.clone()).to_be(ChatResponseChoiceFinishReason::Stop);
expect!(response.choices[0].message.role.clone()).to_be(ChatMessageRole::Assistant);
expect!(response.choices[0]
.message
.content
.clone()
.contains("Tower"))
.to_be(true);
expect!(response.usage.prompt_tokens).to_be_greater_than(0);
expect!(response.usage.completion_tokens).to_be_greater_than(0);
expect!(response.usage.total_tokens).to_be_greater_than(0);
}
#[tokio::test]
async fn test_client_chat_async_with_function_calling() {
setup::setup();
let tools = vec![Tool::new(
"get_city_temperature".to_string(),
"Get the current temperature in a city.".to_string(),
serde_json::json!({
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The name of the city."
}
},
"required": ["city"]
}),
)];
let client = Client::new(None, None, None, None).unwrap();
let model = Model::mistral_small_latest();
let messages = vec![ChatMessage::new_user_message(
"What's the current temperature in Paris?",
)];
let options = ChatParams {
temperature: Some(0.0),
random_seed: Some(42),
tool_choice: Some(ToolChoice::Any),
tools: Some(tools),
..Default::default()
};
let response = client
.chat_async(model, messages, Some(options))
.await
.unwrap();
expect!(response.object).to_be("chat.completion".to_string());
expect!(response.choices.len()).to_be(1);
expect!(response.choices[0].index).to_be(0);
expect!(response.choices[0].finish_reason.clone())
.to_be(ChatResponseChoiceFinishReason::ToolCalls);
expect!(response.choices[0].message.role.clone()).to_be(ChatMessageRole::Assistant);
expect!(response.usage.prompt_tokens).to_be_greater_than(0);
expect!(response.usage.completion_tokens).to_be_greater_than(0);
expect!(response.usage.total_tokens).to_be_greater_than(0);
}