Update to latest Mistral AI API (v1.0.0)
Some checks failed
Test / Test Documentation (push) Has been cancelled
Test / Test Examples (push) Has been cancelled
Test / Test (push) Has been cancelled

- Replace closed Model enum with flexible string-based Model type
  with constructor methods for all current models (Mistral Large 3,
  Small 4, Magistral, Codestral, Devstral, Pixtral, Voxtral, etc.)
- Add new API endpoints: FIM completions, Files, Fine-tuning, Batch
  jobs, OCR, Audio transcription, Moderations/Classifications, and
  Agent completions (sync + async for all)
- Add new chat fields: frequency_penalty, presence_penalty, stop,
  n, parallel_tool_calls, reasoning_effort, min_tokens, json_schema
  response format
- Add embedding fields: output_dimension, output_dtype
- Tool parameters now accept raw JSON Schema (serde_json::Value)
  instead of limited enum types
- Add tool call IDs and Required tool choice variant
- Add DELETE HTTP method support and multipart file upload
- Bump thiserror to v2, add reqwest multipart feature
- Remove strum dependency (no longer needed)
- Update all tests and examples for new API
This commit is contained in:
2026-03-20 17:16:26 +00:00
parent 9ad6a1dc84
commit 79bc40bb15
33 changed files with 1977 additions and 622 deletions

View File

@@ -1,16 +1,18 @@
// Streaming tests require a live API key and are not run in CI.
// Uncomment to test locally.
// use futures::stream::StreamExt;
// use jrest::expect;
// use mistralai_client::v1::{
// chat_completion::{ChatParams, ChatMessage, ChatMessageRole},
// chat::{ChatMessage, ChatParams},
// client::Client,
// constants::Model,
// };
//
// #[tokio::test]
// async fn test_client_chat_stream() {
// let client = Client::new(None, None, None, None).unwrap();
// let model = Model::OpenMistral7b;
//
// let model = Model::mistral_small_latest();
// let messages = vec![ChatMessage::new_user_message(
// "Just guess the next word: \"Eiffel ...\"?",
// )];
@@ -19,22 +21,24 @@
// random_seed: Some(42),
// ..Default::default()
// };
// let stream_result = client.chat_stream(model, messages, Some(options)).await;
// let mut stream = stream_result.expect("Failed to create stream.");
// while let Some(maybe_chunk_result) = stream.next().await {
// match maybe_chunk_result {
// Some(Ok(chunk)) => {
// if chunk.choices[0].delta.role == Some(ChatMessageRole::Assistant)
// || chunk.choices[0].finish_reason == Some("stop".to_string())
// {
// expect!(chunk.choices[0].delta.content.len()).to_be(0);
// } else {
// expect!(chunk.choices[0].delta.content.len()).to_be_greater_than(0);
//
// let stream = client
// .chat_stream(model, messages, Some(options))
// .await
// .expect("Failed to create stream.");
//
// stream
// .for_each(|chunk_result| async {
// match chunk_result {
// Ok(chunks) => {
// for chunk in &chunks {
// if let Some(content) = &chunk.choices[0].delta.content {
// print!("{}", content);
// }
// }
// }
// Err(error) => eprintln!("Error: {:?}", error),
// }
// Some(Err(error)) => eprintln!("Error processing chunk: {:?}", error),
// None => (),
// }
// }
// })
// .await;
// }