feat(chat)!: change safe_prompt, temperature & top_p to non-Option types
BREAKING CHANGE: - `Chat::ChatParams.safe_prompt` & `Chat::ChatRequest.safe_prompt` are now `bool` instead of `Option<bool>`. Default is `false`. - `Chat::ChatParams.temperature` & `Chat::ChatRequest.temperature` are now `f32` instead of `Option<f32>`. Default is `0.7`. - `Chat::ChatParams.top_p` & `Chat::ChatRequest.top_p` are now `f32` instead of `Option<f32>`. Default is `1.0`.
This commit is contained in:
@@ -38,12 +38,14 @@ pub enum ChatMessageRole {
|
||||
User,
|
||||
}
|
||||
|
||||
/// The format that the model must output.
|
||||
///
|
||||
/// See the [API documentation](https://docs.mistral.ai/api/#operation/createChatCompletion) for more information.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ResponseFormat {
|
||||
#[serde(rename = "type")]
|
||||
pub type_: String,
|
||||
}
|
||||
|
||||
impl ResponseFormat {
|
||||
pub fn json_object() -> Self {
|
||||
Self {
|
||||
@@ -55,28 +57,55 @@ impl ResponseFormat {
|
||||
// -----------------------------------------------------------------------------
|
||||
// Request
|
||||
|
||||
/// The parameters for the chat request.
|
||||
///
|
||||
/// See the [API documentation](https://docs.mistral.ai/api/#operation/createChatCompletion) for more information.
|
||||
#[derive(Debug)]
|
||||
pub struct ChatParams {
|
||||
/// The maximum number of tokens to generate in the completion.
|
||||
///
|
||||
/// Defaults to `None`.
|
||||
pub max_tokens: Option<u32>,
|
||||
/// The seed to use for random sampling. If set, different calls will generate deterministic results.
|
||||
///
|
||||
/// Defaults to `None`.
|
||||
pub random_seed: Option<u32>,
|
||||
pub safe_prompt: Option<bool>,
|
||||
pub temperature: Option<f32>,
|
||||
pub tool_choice: Option<tool::ToolChoice>,
|
||||
pub tools: Option<Vec<tool::Tool>>,
|
||||
pub top_p: Option<f32>,
|
||||
/// The format that the model must output.
|
||||
///
|
||||
/// Defaults to `None`.
|
||||
pub response_format: Option<ResponseFormat>,
|
||||
/// Whether to inject a safety prompt before all conversations.
|
||||
///
|
||||
/// Defaults to `false`.
|
||||
pub safe_prompt: bool,
|
||||
/// What sampling temperature to use, between `Some(0.0)` and `Some(1.0)`.
|
||||
///
|
||||
/// Defaults to `0.7`.
|
||||
pub temperature: f32,
|
||||
/// Specifies if/how functions are called.
|
||||
///
|
||||
/// Defaults to `None`.
|
||||
pub tool_choice: Option<tool::ToolChoice>,
|
||||
/// A list of available tools for the model.
|
||||
///
|
||||
/// Defaults to `None`.
|
||||
pub tools: Option<Vec<tool::Tool>>,
|
||||
/// Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass.
|
||||
///
|
||||
/// Defaults to `1.0`.
|
||||
pub top_p: f32,
|
||||
}
|
||||
impl Default for ChatParams {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_tokens: None,
|
||||
random_seed: None,
|
||||
safe_prompt: None,
|
||||
temperature: None,
|
||||
safe_prompt: false,
|
||||
response_format: None,
|
||||
temperature: 0.7,
|
||||
tool_choice: None,
|
||||
tools: None,
|
||||
top_p: None,
|
||||
response_format: None,
|
||||
top_p: 1.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -85,12 +114,12 @@ impl ChatParams {
|
||||
Self {
|
||||
max_tokens: None,
|
||||
random_seed: None,
|
||||
safe_prompt: None,
|
||||
temperature: None,
|
||||
safe_prompt: false,
|
||||
response_format: None,
|
||||
temperature: 0.7,
|
||||
tool_choice: None,
|
||||
tools: None,
|
||||
top_p: None,
|
||||
response_format: Some(ResponseFormat::json_object()),
|
||||
top_p: 1.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -105,20 +134,15 @@ pub struct ChatRequest {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub random_seed: Option<u32>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub safe_prompt: Option<bool>,
|
||||
pub response_format: Option<ResponseFormat>,
|
||||
pub safe_prompt: bool,
|
||||
pub stream: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub temperature: Option<f32>,
|
||||
pub temperature: f32,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tool_choice: Option<tool::ToolChoice>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tools: Option<Vec<tool::Tool>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub top_p: Option<f32>,
|
||||
// TODO Check this prop (seen in official Python client but not in API doc).
|
||||
// pub tool_choice: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub response_format: Option<ResponseFormat>,
|
||||
pub top_p: f32,
|
||||
}
|
||||
impl ChatRequest {
|
||||
pub fn new(
|
||||
|
||||
@@ -115,12 +115,16 @@ pub enum ToolType {
|
||||
Function,
|
||||
}
|
||||
|
||||
/// An enum representing how functions should be called.
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
pub enum ToolChoice {
|
||||
/// The model is forced to call a function.
|
||||
#[serde(rename = "any")]
|
||||
Any,
|
||||
/// The model can choose to either generate a message or call a function.
|
||||
#[serde(rename = "auto")]
|
||||
Auto,
|
||||
/// The model won't call a function and will generate a message instead.
|
||||
#[serde(rename = "none")]
|
||||
None,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user