Skip to content

Commit

Permalink
feat(api): support storing chat completions, enabling evals and model…
Browse files Browse the repository at this point in the history
… distillation in the dashboard (#1112)

Learn more at http://openai.com/devday2024
  • Loading branch information
Stainless Bot committed Oct 1, 2024
1 parent 52c0bb5 commit 6424924
Showing 4 changed files with 46 additions and 2 deletions.
1 change: 1 addition & 0 deletions src/resources/chat/chat.ts
Original file line number Diff line number Diff line change
@@ -16,6 +16,7 @@ export type ChatModel =
| 'gpt-4o'
| 'gpt-4o-2024-08-06'
| 'gpt-4o-2024-05-13'
| 'gpt-4o-realtime-preview-2024-10-01'
| 'chatgpt-4o-latest'
| 'gpt-4o-mini'
| 'gpt-4o-mini-2024-07-18'
20 changes: 18 additions & 2 deletions src/resources/chat/completions.ts
Original file line number Diff line number Diff line change
@@ -727,8 +727,12 @@ export type ChatCompletionCreateParams =

export interface ChatCompletionCreateParamsBase {
/**
* A list of messages comprising the conversation so far.
* [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
* A list of messages comprising the conversation so far. Depending on the
* [model](https://platform.openai.com/docs/models) you use, different message
* types (modalities) are supported, like
* [text](https://platform.openai.com/docs/guides/text-generation),
* [images](https://platform.openai.com/docs/guides/vision), and
* [audio](https://platform.openai.com/docs/guides/audio).
*/
messages: Array<ChatCompletionMessageParam>;

@@ -806,6 +810,12 @@ export interface ChatCompletionCreateParamsBase {
*/
max_tokens?: number | null;

/**
* Developer-defined tags and values used for filtering completions in the
* [dashboard](https://platform.openai.com/completions).
*/
metadata?: Record<string, string> | null;

/**
* How many chat completion choices to generate for each input message. Note that
* you will be charged based on the number of generated tokens across all of the
@@ -889,6 +899,12 @@ export interface ChatCompletionCreateParamsBase {
*/
stop?: string | null | Array<string>;

/**
* Whether or not to store the output of this completion request for traffic
* logging in the [dashboard](https://platform.openai.com/completions).
*/
store?: boolean | null;

/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
* sent as data-only
25 changes: 25 additions & 0 deletions src/resources/completions.ts
Original file line number Diff line number Diff line change
@@ -125,18 +125,43 @@ export interface CompletionUsage {
* Breakdown of tokens used in a completion.
*/
completion_tokens_details?: CompletionUsage.CompletionTokensDetails;

/**
* Breakdown of tokens used in the prompt.
*/
prompt_tokens_details?: CompletionUsage.PromptTokensDetails;
}

export namespace CompletionUsage {
/**
* Breakdown of tokens used in a completion.
*/
export interface CompletionTokensDetails {
/**
* Audio input tokens generated by the model.
*/
audio_tokens?: number;

/**
* Tokens generated by the model for reasoning.
*/
reasoning_tokens?: number;
}

/**
* Breakdown of tokens used in the prompt.
*/
export interface PromptTokensDetails {
/**
* Audio input tokens present in the prompt.
*/
audio_tokens?: number;

/**
* Cached tokens present in the prompt.
*/
cached_tokens?: number;
}
}

export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
2 changes: 2 additions & 0 deletions tests/api-resources/chat/completions.test.ts
Original file line number Diff line number Diff line change
@@ -34,13 +34,15 @@ describe('resource completions', () => {
logprobs: true,
max_completion_tokens: 0,
max_tokens: 0,
metadata: { foo: 'string' },
n: 1,
parallel_tool_calls: true,
presence_penalty: -2,
response_format: { type: 'text' },
seed: -9007199254740991,
service_tier: 'auto',
stop: 'string',
store: true,
stream: false,
stream_options: { include_usage: true },
temperature: 1,

0 comments on commit 6424924

Please sign in to comment.