Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(api): add o1 models #49

Merged
merged 1 commit into from
Sep 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 68
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-85a85e0c08de456441431c0ae4e9c078cc8f9748c29430b9a9058340db6389ee.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-501122aa32adaa2abb3d4487880ab9cdf2141addce2e6c3d1bd9bb6b44c318a8.yml
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func main() {
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
})
if err != nil {
panic(err.Error())
Expand Down Expand Up @@ -240,7 +240,7 @@ client.Chat.Completions.New(
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
},
// This sets the per-retry timeout
option.WithRequestTimeout(20*time.Second),
Expand Down Expand Up @@ -303,7 +303,7 @@ client.Chat.Completions.New(
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
},
option.WithMaxRetries(5),
)
Expand Down
28 changes: 16 additions & 12 deletions betaassistant.go
Original file line number Diff line number Diff line change
Expand Up @@ -1860,7 +1860,8 @@ type FileSearchToolFileSearch struct {
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
// for more information.
MaxNumResults int64 `json:"max_num_results"`
// The ranking options for the file search.
// The ranking options for the file search. If not specified, the file search tool
// will use the `auto` ranker and a score_threshold of 0.
//
// See the
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
Expand All @@ -1886,26 +1887,27 @@ func (r fileSearchToolFileSearchJSON) RawJSON() string {
return r.raw
}

// The ranking options for the file search.
// The ranking options for the file search. If not specified, the file search tool
// will use the `auto` ranker and a score_threshold of 0.
//
// See the
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
// for more information.
type FileSearchToolFileSearchRankingOptions struct {
// The score threshold for the file search. All values must be a floating point
// number between 0 and 1.
ScoreThreshold float64 `json:"score_threshold,required"`
// The ranker to use for the file search. If not specified will use the `auto`
// ranker.
Ranker FileSearchToolFileSearchRankingOptionsRanker `json:"ranker"`
// The score threshold for the file search. All values must be a floating point
// number between 0 and 1.
ScoreThreshold float64 `json:"score_threshold"`
JSON fileSearchToolFileSearchRankingOptionsJSON `json:"-"`
JSON fileSearchToolFileSearchRankingOptionsJSON `json:"-"`
}

// fileSearchToolFileSearchRankingOptionsJSON contains the JSON metadata for the
// struct [FileSearchToolFileSearchRankingOptions]
type fileSearchToolFileSearchRankingOptionsJSON struct {
Ranker apijson.Field
ScoreThreshold apijson.Field
Ranker apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
Expand Down Expand Up @@ -1961,7 +1963,8 @@ type FileSearchToolFileSearchParam struct {
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
// for more information.
MaxNumResults param.Field[int64] `json:"max_num_results"`
// The ranking options for the file search.
// The ranking options for the file search. If not specified, the file search tool
// will use the `auto` ranker and a score_threshold of 0.
//
// See the
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
Expand All @@ -1973,18 +1976,19 @@ func (r FileSearchToolFileSearchParam) MarshalJSON() (data []byte, err error) {
return apijson.MarshalRoot(r)
}

// The ranking options for the file search.
// The ranking options for the file search. If not specified, the file search tool
// will use the `auto` ranker and a score_threshold of 0.
//
// See the
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
// for more information.
type FileSearchToolFileSearchRankingOptionsParam struct {
// The score threshold for the file search. All values must be a floating point
// number between 0 and 1.
ScoreThreshold param.Field[float64] `json:"score_threshold,required"`
// The ranker to use for the file search. If not specified will use the `auto`
// ranker.
Ranker param.Field[FileSearchToolFileSearchRankingOptionsRanker] `json:"ranker"`
// The score threshold for the file search. All values must be a floating point
// number between 0 and 1.
ScoreThreshold param.Field[float64] `json:"score_threshold"`
}

func (r FileSearchToolFileSearchRankingOptionsParam) MarshalJSON() (data []byte, err error) {
Expand Down
6 changes: 5 additions & 1 deletion chat.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,13 @@ func NewChatService(opts ...option.RequestOption) (r *ChatService) {
type ChatModel = string

const (
ChatModelO1Preview ChatModel = "o1-preview"
ChatModelO1Preview2024_09_12 ChatModel = "o1-preview-2024-09-12"
ChatModelO1Mini ChatModel = "o1-mini"
ChatModelO1Mini2024_09_12 ChatModel = "o1-mini-2024-09-12"
ChatModelGPT4o ChatModel = "gpt-4o"
ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13"
ChatModelGPT4o2024_08_06 ChatModel = "gpt-4o-2024-08-06"
ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13"
ChatModelChatgpt4oLatest ChatModel = "chatgpt-4o-latest"
ChatModelGPT4oMini ChatModel = "gpt-4o-mini"
ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18"
Expand Down
46 changes: 28 additions & 18 deletions chatcompletion.go
Original file line number Diff line number Diff line change
Expand Up @@ -1385,13 +1385,17 @@ type ChatCompletionNewParams struct {
// returns the log probabilities of each output token returned in the `content` of
// `message`.
Logprobs param.Field[bool] `json:"logprobs"`
// An upper bound for the number of tokens that can be generated for a completion,
// including visible output tokens and
// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
MaxCompletionTokens param.Field[int64] `json:"max_completion_tokens"`
// The maximum number of [tokens](/tokenizer) that can be generated in the chat
// completion.
// completion. This value can be used to control
// [costs](https://openai.com/api/pricing/) for text generated via API.
//
// The total length of input tokens and generated tokens is limited by the model's
// context length.
// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
// for counting tokens.
// This value is now deprecated in favor of `max_completion_tokens`, and is not
// compatible with
// [o1 series models](https://platform.openai.com/docs/guides/reasoning).
MaxTokens param.Field[int64] `json:"max_tokens"`
// How many chat completion choices to generate for each input message. Note that
// you will be charged based on the number of generated tokens across all of the
Expand All @@ -1414,11 +1418,11 @@ type ChatCompletionNewParams struct {
// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
//
// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
// Outputs which guarantees the model will match your supplied JSON schema. Learn
// more in the
// Outputs which ensures the model will match your supplied JSON schema. Learn more
// in the
// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
//
// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
// message the model generates is valid JSON.
//
// **Important:** when using JSON mode, you **must** also instruct the model to
Expand All @@ -1438,8 +1442,11 @@ type ChatCompletionNewParams struct {
// Specifies the latency tier to use for processing the request. This parameter is
// relevant for customers subscribed to the scale tier service:
//
// - If set to 'auto', the system will utilize scale tier credits until they are
// exhausted.
// - If set to 'auto', and the Project is Scale tier enabled, the system will
// utilize scale tier credits until they are exhausted.
// - If set to 'auto', and the Project is not Scale tier enabled, the request will
// be processed using the default service tier with a lower uptime SLA and no
// latency guarentee.
// - If set to 'default', the request will be processed using the default service
// tier with a lower uptime SLA and no latency guarentee.
// - When not set, the default behavior is 'auto'.
Expand Down Expand Up @@ -1557,11 +1564,11 @@ func (r ChatCompletionNewParamsFunction) MarshalJSON() (data []byte, err error)
// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
//
// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
// Outputs which guarantees the model will match your supplied JSON schema. Learn
// more in the
// Outputs which ensures the model will match your supplied JSON schema. Learn more
// in the
// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
//
// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
// message the model generates is valid JSON.
//
// **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down Expand Up @@ -1591,11 +1598,11 @@ func (r ChatCompletionNewParamsResponseFormat) ImplementsChatCompletionNewParams
// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
//
// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
// Outputs which guarantees the model will match your supplied JSON schema. Learn
// more in the
// Outputs which ensures the model will match your supplied JSON schema. Learn more
// in the
// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
//
// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
// message the model generates is valid JSON.
//
// **Important:** when using JSON mode, you **must** also instruct the model to
Expand Down Expand Up @@ -1633,8 +1640,11 @@ func (r ChatCompletionNewParamsResponseFormatType) IsKnown() bool {
// Specifies the latency tier to use for processing the request. This parameter is
// relevant for customers subscribed to the scale tier service:
//
// - If set to 'auto', the system will utilize scale tier credits until they are
// exhausted.
// - If set to 'auto', and the Project is Scale tier enabled, the system will
// utilize scale tier credits until they are exhausted.
// - If set to 'auto', and the Project is not Scale tier enabled, the request will
// be processed using the default service tier with a lower uptime SLA and no
// latency guarentee.
// - If set to 'default', the request will be processed using the default service
// tier with a lower uptime SLA and no latency guarentee.
// - When not set, the default behavior is 'auto'.
Expand Down
13 changes: 7 additions & 6 deletions chatcompletion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
Role: openai.F(openai.ChatCompletionSystemMessageParamRoleSystem),
Name: openai.F("name"),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
FrequencyPenalty: openai.F(-2.000000),
FunctionCall: openai.F[openai.ChatCompletionNewParamsFunctionCallUnion](openai.ChatCompletionNewParamsFunctionCallString(openai.ChatCompletionNewParamsFunctionCallStringNone)),
Functions: openai.F([]openai.ChatCompletionNewParamsFunction{{
Expand All @@ -45,11 +45,12 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
LogitBias: openai.F(map[string]int64{
"foo": int64(0),
}),
Logprobs: openai.F(true),
MaxTokens: openai.F(int64(0)),
N: openai.F(int64(1)),
ParallelToolCalls: openai.F(true),
PresencePenalty: openai.F(-2.000000),
Logprobs: openai.F(true),
MaxCompletionTokens: openai.F(int64(0)),
MaxTokens: openai.F(int64(0)),
N: openai.F(int64(1)),
ParallelToolCalls: openai.F(true),
PresencePenalty: openai.F(-2.000000),
ResponseFormat: openai.F[openai.ChatCompletionNewParamsResponseFormatUnion](shared.ResponseFormatTextParam{
Type: openai.F(shared.ResponseFormatTextTypeText),
}),
Expand Down
12 changes: 6 additions & 6 deletions client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func TestUserAgentHeader(t *testing.T) {
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
})
if userAgent != fmt.Sprintf("OpenAI/Go %s", internal.PackageVersion) {
t.Errorf("Expected User-Agent to be correct, but got: %#v", userAgent)
Expand Down Expand Up @@ -70,7 +70,7 @@ func TestRetryAfter(t *testing.T) {
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
})
if err == nil || res != nil {
t.Error("Expected there to be a cancel error and for the response to be nil")
Expand Down Expand Up @@ -102,7 +102,7 @@ func TestRetryAfterMs(t *testing.T) {
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
})
if err == nil || res != nil {
t.Error("Expected there to be a cancel error and for the response to be nil")
Expand Down Expand Up @@ -130,7 +130,7 @@ func TestContextCancel(t *testing.T) {
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
})
if err == nil || res != nil {
t.Error("Expected there to be a cancel error and for the response to be nil")
Expand All @@ -155,7 +155,7 @@ func TestContextCancelDelay(t *testing.T) {
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
})
if err == nil || res != nil {
t.Error("expected there to be a cancel error and for the response to be nil")
Expand Down Expand Up @@ -186,7 +186,7 @@ func TestContextDeadline(t *testing.T) {
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
})
if err == nil || res != nil {
t.Error("expected there to be a deadline error and for the response to be nil")
Expand Down
40 changes: 33 additions & 7 deletions completion.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,17 +197,20 @@ type CompletionUsage struct {
// Number of tokens in the prompt.
PromptTokens int64 `json:"prompt_tokens,required"`
// Total number of tokens used in the request (prompt + completion).
TotalTokens int64 `json:"total_tokens,required"`
JSON completionUsageJSON `json:"-"`
TotalTokens int64 `json:"total_tokens,required"`
// Breakdown of tokens used in a completion.
CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
JSON completionUsageJSON `json:"-"`
}

// completionUsageJSON contains the JSON metadata for the struct [CompletionUsage]
type completionUsageJSON struct {
CompletionTokens apijson.Field
PromptTokens apijson.Field
TotalTokens apijson.Field
raw string
ExtraFields map[string]apijson.Field
CompletionTokens apijson.Field
PromptTokens apijson.Field
TotalTokens apijson.Field
CompletionTokensDetails apijson.Field
raw string
ExtraFields map[string]apijson.Field
}

func (r *CompletionUsage) UnmarshalJSON(data []byte) (err error) {
Expand All @@ -218,6 +221,29 @@ func (r completionUsageJSON) RawJSON() string {
return r.raw
}

// Breakdown of tokens used in a completion.
type CompletionUsageCompletionTokensDetails struct {
// Tokens generated by the model for reasoning.
ReasoningTokens int64 `json:"reasoning_tokens"`
JSON completionUsageCompletionTokensDetailsJSON `json:"-"`
}

// completionUsageCompletionTokensDetailsJSON contains the JSON metadata for the
// struct [CompletionUsageCompletionTokensDetails]
type completionUsageCompletionTokensDetailsJSON struct {
ReasoningTokens apijson.Field
raw string
ExtraFields map[string]apijson.Field
}

func (r *CompletionUsageCompletionTokensDetails) UnmarshalJSON(data []byte) (err error) {
return apijson.UnmarshalRoot(data, r)
}

func (r completionUsageCompletionTokensDetailsJSON) RawJSON() string {
return r.raw
}

type CompletionNewParams struct {
// ID of the model to use. You can use the
// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
Expand Down
2 changes: 1 addition & 1 deletion finetuningjob.go
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,7 @@ type FineTuningJobNewParams struct {
// job parameters should produce the same results, but may differ in rare cases. If
// a seed is not specified, one will be generated for you.
Seed param.Field[int64] `json:"seed"`
// A string of up to 18 characters that will be added to your fine-tuned model
// A string of up to 64 characters that will be added to your fine-tuned model
// name.
//
// For example, a `suffix` of "custom-model-name" would produce a model name like
Expand Down
2 changes: 1 addition & 1 deletion usage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func TestUsage(t *testing.T) {
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
}}),
Model: openai.F(openai.ChatModelGPT4o),
Model: openai.F(openai.ChatModelO1Preview),
})
if err != nil {
t.Error(err)
Expand Down