Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

584 the assistantmodifyrequest not math the doc of openai #585

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 39 additions & 4 deletions OpenAI.SDK/ObjectModels/RequestModels/AssistantModifyRequest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

namespace OpenAI.ObjectModels.RequestModels;

public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IFileIds, IOpenAiModels.IMetaData
public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IMetaData, IOpenAiModels.ITemperature
{
/// <summary>
/// The name of the assistant. The maximum length is 256
Expand All @@ -30,10 +30,12 @@ public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IFileI
public List<ToolDefinition>? Tools { get; set; }

/// <summary>
/// A list of File IDs attached to this assistant.
/// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For
/// example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of
/// vector store IDs.
/// </summary>
[JsonPropertyName("file_ids")]
public List<string>? FileIds { get; set; }
[JsonPropertyName("tool_resources")]
public ToolResources? ToolResources { get; set; }

/// <summary>
/// Set of 16 key-value pairs that can be attached to an object.
Expand All @@ -46,4 +48,37 @@ public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IFileI
/// </summary>
[JsonPropertyName("model")]
public string Model { get; set; }

/// <summary>
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
/// lower values like 0.2 will make it more focused and deterministic.
/// </summary>
[JsonPropertyName("temperature")]
public float? Temperature { get; set; }

/// <summary>
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
/// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are
/// considered.
/// We generally recommend altering this or temperature but not both.
/// </summary>
[JsonPropertyName("top_p")]
public double? TopP { get; set; }

/// <summary>
/// Specifies the format that the model must output. Compatible with
/// <a href="https://platform.openai.com/docs/models/gpt-4o">GPT-4o</a>,
/// <a href="https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4">GPT-4 Turbo</a>, and all GPT-3.5 Turbo
/// models since gpt-3.5-turbo-1106.
/// Setting to <c>{ "type": "json_object" }</c> enables JSON mode, which guarantees the message the model generates is
/// valid JSON. <br />
/// <b>Important: </b>when using JSON mode, you must also instruct the model to produce JSON yourself via a system or
/// user message.Without this, the model may generate an unending stream of whitespace until the generation reaches the
/// token limit, resulting in a long-running and seemingly "stuck" request.Also note that the message content may be
/// partially cut off if <c>finish_reason= "length"</c>, which indicates the generation exceeded <c>max_tokens</c> or
/// the
/// conversation exceeded the max context length.
/// </summary>
[JsonPropertyName("response_format")]
public ResponseFormatOneOfType? ResponseFormat { get; set; }
}