diff --git a/.stats.yml b/.stats.yml index 67778ee..ece2873 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ad878332083dd506a478a293db78dc9e7b1b2124f2682e1d991225bc5bbcc3b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-52b934aee6468039ec7f4ce046a282b5fbce114afc708e70f17121df654f71da.yml diff --git a/chat.go b/chat.go index 33bcf54..ab1f5f1 100644 --- a/chat.go +++ b/chat.go @@ -30,33 +30,34 @@ func NewChatService(opts ...option.RequestOption) (r *ChatService) { type ChatModel = string const ( - ChatModelO1Preview ChatModel = "o1-preview" - ChatModelO1Preview2024_09_12 ChatModel = "o1-preview-2024-09-12" - ChatModelO1Mini ChatModel = "o1-mini" - ChatModelO1Mini2024_09_12 ChatModel = "o1-mini-2024-09-12" - ChatModelGPT4o ChatModel = "gpt-4o" - ChatModelGPT4o2024_08_06 ChatModel = "gpt-4o-2024-08-06" - ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13" - ChatModelChatgpt4oLatest ChatModel = "chatgpt-4o-latest" - ChatModelGPT4oMini ChatModel = "gpt-4o-mini" - ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18" - ChatModelGPT4Turbo ChatModel = "gpt-4-turbo" - ChatModelGPT4Turbo2024_04_09 ChatModel = "gpt-4-turbo-2024-04-09" - ChatModelGPT4_0125Preview ChatModel = "gpt-4-0125-preview" - ChatModelGPT4TurboPreview ChatModel = "gpt-4-turbo-preview" - ChatModelGPT4_1106Preview ChatModel = "gpt-4-1106-preview" - ChatModelGPT4VisionPreview ChatModel = "gpt-4-vision-preview" - ChatModelGPT4 ChatModel = "gpt-4" - ChatModelGPT4_0314 ChatModel = "gpt-4-0314" - ChatModelGPT4_0613 ChatModel = "gpt-4-0613" - ChatModelGPT4_32k ChatModel = "gpt-4-32k" - ChatModelGPT4_32k0314 ChatModel = "gpt-4-32k-0314" - ChatModelGPT4_32k0613 ChatModel = "gpt-4-32k-0613" - ChatModelGPT3_5Turbo ChatModel = "gpt-3.5-turbo" - ChatModelGPT3_5Turbo16k ChatModel = "gpt-3.5-turbo-16k" - ChatModelGPT3_5Turbo0301 ChatModel = "gpt-3.5-turbo-0301" - ChatModelGPT3_5Turbo0613 ChatModel = "gpt-3.5-turbo-0613" - ChatModelGPT3_5Turbo1106 ChatModel = "gpt-3.5-turbo-1106" - ChatModelGPT3_5Turbo0125 ChatModel = "gpt-3.5-turbo-0125" - ChatModelGPT3_5Turbo16k0613 ChatModel = "gpt-3.5-turbo-16k-0613" + ChatModelO1Preview ChatModel = "o1-preview" + ChatModelO1Preview2024_09_12 ChatModel = "o1-preview-2024-09-12" + ChatModelO1Mini ChatModel = "o1-mini" + ChatModelO1Mini2024_09_12 ChatModel = "o1-mini-2024-09-12" + ChatModelGPT4o ChatModel = "gpt-4o" + ChatModelGPT4o2024_08_06 ChatModel = "gpt-4o-2024-08-06" + ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13" + ChatModelGPT4oRealtimePreview2024_10_01 ChatModel = "gpt-4o-realtime-preview-2024-10-01" + ChatModelChatgpt4oLatest ChatModel = "chatgpt-4o-latest" + ChatModelGPT4oMini ChatModel = "gpt-4o-mini" + ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18" + ChatModelGPT4Turbo ChatModel = "gpt-4-turbo" + ChatModelGPT4Turbo2024_04_09 ChatModel = "gpt-4-turbo-2024-04-09" + ChatModelGPT4_0125Preview ChatModel = "gpt-4-0125-preview" + ChatModelGPT4TurboPreview ChatModel = "gpt-4-turbo-preview" + ChatModelGPT4_1106Preview ChatModel = "gpt-4-1106-preview" + ChatModelGPT4VisionPreview ChatModel = "gpt-4-vision-preview" + ChatModelGPT4 ChatModel = "gpt-4" + ChatModelGPT4_0314 ChatModel = "gpt-4-0314" + ChatModelGPT4_0613 ChatModel = "gpt-4-0613" + ChatModelGPT4_32k ChatModel = "gpt-4-32k" + ChatModelGPT4_32k0314 ChatModel = "gpt-4-32k-0314" + ChatModelGPT4_32k0613 ChatModel = "gpt-4-32k-0613" + ChatModelGPT3_5Turbo ChatModel = "gpt-3.5-turbo" + ChatModelGPT3_5Turbo16k ChatModel = "gpt-3.5-turbo-16k" + ChatModelGPT3_5Turbo0301 ChatModel = "gpt-3.5-turbo-0301" + ChatModelGPT3_5Turbo0613 ChatModel = "gpt-3.5-turbo-0613" + ChatModelGPT3_5Turbo1106 ChatModel = "gpt-3.5-turbo-1106" + ChatModelGPT3_5Turbo0125 ChatModel = "gpt-3.5-turbo-0125" + ChatModelGPT3_5Turbo16k0613 ChatModel = "gpt-3.5-turbo-16k-0613" ) diff --git a/chatcompletion.go b/chatcompletion.go index 3a368c1..8166a88 100644 --- a/chatcompletion.go +++ b/chatcompletion.go @@ -1442,8 +1442,12 @@ func (r ChatCompletionUserMessageParamRole) IsKnown() bool { } type ChatCompletionNewParams struct { - // A list of messages comprising the conversation so far. - // [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + // A list of messages comprising the conversation so far. Depending on the + // [model](https://platform.openai.com/docs/models) you use, different message + // types (modalities) are supported, like + // [text](https://platform.openai.com/docs/guides/text-generation), + // [images](https://platform.openai.com/docs/guides/vision), and + // [audio](https://platform.openai.com/docs/guides/audio). Messages param.Field[[]ChatCompletionMessageParamUnion] `json:"messages,required"` // ID of the model to use. See the // [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) @@ -1495,6 +1499,9 @@ type ChatCompletionNewParams struct { // compatible with // [o1 series models](https://platform.openai.com/docs/guides/reasoning). MaxTokens param.Field[int64] `json:"max_tokens"` + // Developer-defined tags and values used for filtering completions in the + // [dashboard](https://platform.openai.com/completions). + Metadata param.Field[map[string]string] `json:"metadata"` // How many chat completion choices to generate for each input message. Note that // you will be charged based on the number of generated tokens across all of the // choices. Keep `n` as `1` to minimize costs. @@ -1554,6 +1561,9 @@ type ChatCompletionNewParams struct { ServiceTier param.Field[ChatCompletionNewParamsServiceTier] `json:"service_tier"` // Up to 4 sequences where the API will stop generating further tokens. Stop param.Field[ChatCompletionNewParamsStopUnion] `json:"stop"` + // Whether or not to store the output of this completion request for traffic + // logging in the [dashboard](https://platform.openai.com/completions). + Store param.Field[bool] `json:"store"` // Options for streaming response. Only set this when you set `stream: true`. StreamOptions param.Field[ChatCompletionStreamOptionsParam] `json:"stream_options"` // What sampling temperature to use, between 0 and 2. Higher values like 0.8 will diff --git a/chatcompletion_test.go b/chatcompletion_test.go index 7f5250c..e0ebbd8 100644 --- a/chatcompletion_test.go +++ b/chatcompletion_test.go @@ -48,15 +48,19 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) { Logprobs: openai.F(true), MaxCompletionTokens: openai.F(int64(0)), MaxTokens: openai.F(int64(0)), - N: openai.F(int64(1)), - ParallelToolCalls: openai.F(true), - PresencePenalty: openai.F(-2.000000), + Metadata: openai.F(map[string]string{ + "foo": "string", + }), + N: openai.F(int64(1)), + ParallelToolCalls: openai.F(true), + PresencePenalty: openai.F(-2.000000), ResponseFormat: openai.F[openai.ChatCompletionNewParamsResponseFormatUnion](shared.ResponseFormatTextParam{ Type: openai.F(shared.ResponseFormatTextTypeText), }), Seed: openai.F(int64(-9007199254740991)), ServiceTier: openai.F(openai.ChatCompletionNewParamsServiceTierAuto), Stop: openai.F[openai.ChatCompletionNewParamsStopUnion](shared.UnionString("string")), + Store: openai.F(true), StreamOptions: openai.F(openai.ChatCompletionStreamOptionsParam{ IncludeUsage: openai.F(true), }), diff --git a/completion.go b/completion.go index a55022d..fb43634 100644 --- a/completion.go +++ b/completion.go @@ -200,7 +200,9 @@ type CompletionUsage struct { TotalTokens int64 `json:"total_tokens,required"` // Breakdown of tokens used in a completion. CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"` - JSON completionUsageJSON `json:"-"` + // Breakdown of tokens used in the prompt. + PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"` + JSON completionUsageJSON `json:"-"` } // completionUsageJSON contains the JSON metadata for the struct [CompletionUsage] @@ -209,6 +211,7 @@ type completionUsageJSON struct { PromptTokens apijson.Field TotalTokens apijson.Field CompletionTokensDetails apijson.Field + PromptTokensDetails apijson.Field raw string ExtraFields map[string]apijson.Field } @@ -223,6 +226,8 @@ func (r completionUsageJSON) RawJSON() string { // Breakdown of tokens used in a completion. type CompletionUsageCompletionTokensDetails struct { + // Audio input tokens generated by the model. + AudioTokens int64 `json:"audio_tokens"` // Tokens generated by the model for reasoning. ReasoningTokens int64 `json:"reasoning_tokens"` JSON completionUsageCompletionTokensDetailsJSON `json:"-"` @@ -231,6 +236,7 @@ type CompletionUsageCompletionTokensDetails struct { // completionUsageCompletionTokensDetailsJSON contains the JSON metadata for the // struct [CompletionUsageCompletionTokensDetails] type completionUsageCompletionTokensDetailsJSON struct { + AudioTokens apijson.Field ReasoningTokens apijson.Field raw string ExtraFields map[string]apijson.Field @@ -244,6 +250,32 @@ func (r completionUsageCompletionTokensDetailsJSON) RawJSON() string { return r.raw } +// Breakdown of tokens used in the prompt. +type CompletionUsagePromptTokensDetails struct { + // Audio input tokens present in the prompt. + AudioTokens int64 `json:"audio_tokens"` + // Cached tokens present in the prompt. + CachedTokens int64 `json:"cached_tokens"` + JSON completionUsagePromptTokensDetailsJSON `json:"-"` +} + +// completionUsagePromptTokensDetailsJSON contains the JSON metadata for the struct +// [CompletionUsagePromptTokensDetails] +type completionUsagePromptTokensDetailsJSON struct { + AudioTokens apijson.Field + CachedTokens apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *CompletionUsagePromptTokensDetails) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r completionUsagePromptTokensDetailsJSON) RawJSON() string { + return r.raw +} + type CompletionNewParams struct { // ID of the model to use. You can use the // [List models](https://platform.openai.com/docs/api-reference/models/list) API to