-
Notifications
You must be signed in to change notification settings - Fork 34
/
completion.go
456 lines (404 loc) · 18.6 KB
/
completion.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
package openai
import (
"context"
"net/http"
"github.com/openai/openai-go/internal/apijson"
"github.com/openai/openai-go/internal/param"
"github.com/openai/openai-go/internal/requestconfig"
"github.com/openai/openai-go/option"
"github.com/openai/openai-go/packages/ssestream"
)
// CompletionService contains methods and other services that help with interacting
// with the openai API.
//
// Note, unlike clients, this service does not read variables from the environment
// automatically. You should not instantiate this service directly, and instead use
// the [NewCompletionService] method instead.
type CompletionService struct {
Options []option.RequestOption
}
// NewCompletionService generates a new service that applies the given options to
// each request. These options are applied after the parent client's options (if
// there is one), and before any request-specific options.
func NewCompletionService(opts ...option.RequestOption) (r *CompletionService) {
r = &CompletionService{}
r.Options = opts
return
}
// Creates a completion for the provided prompt and parameters.
func (r *CompletionService) New(ctx context.Context, body CompletionNewParams, opts ...option.RequestOption) (res *Completion, err error) {
opts = append(r.Options[:], opts...)
path := "completions"
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...)
return
}
// Creates a completion for the provided prompt and parameters.
func (r *CompletionService) NewStreaming(ctx context.Context, body CompletionNewParams, opts ...option.RequestOption) (stream *ssestream.Stream[Completion]) {
var (
raw *http.Response
err error
)
opts = append(r.Options[:], opts...)
opts = append([]option.RequestOption{option.WithJSONSet("stream", true)}, opts...)
path := "completions"
err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &raw, opts...)
return ssestream.NewStream[Completion](ssestream.NewDecoder(raw), err)
}
// Represents a completion response from the API. Note: both the streamed and
// non-streamed response objects share the same shape (unlike the chat endpoint).
type Completion struct {
// A unique identifier for the completion.
ID string `json:"id,required"`
// The list of completion choices the model generated for the input prompt.
Choices []CompletionChoice `json:"choices,required"`
// The Unix timestamp (in seconds) of when the completion was created.
Created int64 `json:"created,required"`
// The model used for completion.
Model string `json:"model,required"`
// The object type, which is always "text_completion"
Object CompletionObject `json:"object,required"`
// This fingerprint represents the backend configuration that the model runs with.
//
// Can be used in conjunction with the `seed` request parameter to understand when
// backend changes have been made that might impact determinism.
SystemFingerprint string `json:"system_fingerprint"`
// Usage statistics for the completion request.
Usage CompletionUsage `json:"usage"`
JSON completionJSON `json:"-"`
}
// completionJSON contains the JSON metadata for the struct [Completion]
type completionJSON struct {
ID apijson.Field
Choices apijson.Field
Created apijson.Field
Model apijson.Field
Object apijson.Field
SystemFingerprint apijson.Field
Usage apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
func (r *Completion) UnmarshalJSON(data []byte) (err error) {
return apijson.UnmarshalRoot(data, r)
}
func (r completionJSON) RawJSON() string {
return r.raw
}
// The object type, which is always "text_completion"
type CompletionObject string
const (
CompletionObjectTextCompletion CompletionObject = "text_completion"
)
func (r CompletionObject) IsKnown() bool {
switch r {
case CompletionObjectTextCompletion:
return true
}
return false
}
type CompletionChoice struct {
// The reason the model stopped generating tokens. This will be `stop` if the model
// hit a natural stop point or a provided stop sequence, `length` if the maximum
// number of tokens specified in the request was reached, or `content_filter` if
// content was omitted due to a flag from our content filters.
FinishReason CompletionChoiceFinishReason `json:"finish_reason,required"`
Index int64 `json:"index,required"`
Logprobs CompletionChoiceLogprobs `json:"logprobs,required,nullable"`
Text string `json:"text,required"`
JSON completionChoiceJSON `json:"-"`
}
// completionChoiceJSON contains the JSON metadata for the struct
// [CompletionChoice]
type completionChoiceJSON struct {
FinishReason apijson.Field
Index apijson.Field
Logprobs apijson.Field
Text apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
func (r *CompletionChoice) UnmarshalJSON(data []byte) (err error) {
return apijson.UnmarshalRoot(data, r)
}
func (r completionChoiceJSON) RawJSON() string {
return r.raw
}
// The reason the model stopped generating tokens. This will be `stop` if the model
// hit a natural stop point or a provided stop sequence, `length` if the maximum
// number of tokens specified in the request was reached, or `content_filter` if
// content was omitted due to a flag from our content filters.
type CompletionChoiceFinishReason string
const (
CompletionChoiceFinishReasonStop CompletionChoiceFinishReason = "stop"
CompletionChoiceFinishReasonLength CompletionChoiceFinishReason = "length"
CompletionChoiceFinishReasonContentFilter CompletionChoiceFinishReason = "content_filter"
)
func (r CompletionChoiceFinishReason) IsKnown() bool {
switch r {
case CompletionChoiceFinishReasonStop, CompletionChoiceFinishReasonLength, CompletionChoiceFinishReasonContentFilter:
return true
}
return false
}
type CompletionChoiceLogprobs struct {
TextOffset []int64 `json:"text_offset"`
TokenLogprobs []float64 `json:"token_logprobs"`
Tokens []string `json:"tokens"`
TopLogprobs []map[string]float64 `json:"top_logprobs"`
JSON completionChoiceLogprobsJSON `json:"-"`
}
// completionChoiceLogprobsJSON contains the JSON metadata for the struct
// [CompletionChoiceLogprobs]
type completionChoiceLogprobsJSON struct {
TextOffset apijson.Field
TokenLogprobs apijson.Field
Tokens apijson.Field
TopLogprobs apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
func (r *CompletionChoiceLogprobs) UnmarshalJSON(data []byte) (err error) {
return apijson.UnmarshalRoot(data, r)
}
func (r completionChoiceLogprobsJSON) RawJSON() string {
return r.raw
}
// Usage statistics for the completion request.
type CompletionUsage struct {
// Number of tokens in the generated completion.
CompletionTokens int64 `json:"completion_tokens,required"`
// Number of tokens in the prompt.
PromptTokens int64 `json:"prompt_tokens,required"`
// Total number of tokens used in the request (prompt + completion).
TotalTokens int64 `json:"total_tokens,required"`
// Breakdown of tokens used in a completion.
CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
// Breakdown of tokens used in the prompt.
PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
JSON completionUsageJSON `json:"-"`
}
// completionUsageJSON contains the JSON metadata for the struct [CompletionUsage]
type completionUsageJSON struct {
CompletionTokens apijson.Field
PromptTokens apijson.Field
TotalTokens apijson.Field
CompletionTokensDetails apijson.Field
PromptTokensDetails apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
func (r *CompletionUsage) UnmarshalJSON(data []byte) (err error) {
return apijson.UnmarshalRoot(data, r)
}
func (r completionUsageJSON) RawJSON() string {
return r.raw
}
// Breakdown of tokens used in a completion.
type CompletionUsageCompletionTokensDetails struct {
// When using Predicted Outputs, the number of tokens in the prediction that
// appeared in the completion.
AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
// Audio input tokens generated by the model.
AudioTokens int64 `json:"audio_tokens"`
// Tokens generated by the model for reasoning.
ReasoningTokens int64 `json:"reasoning_tokens"`
// When using Predicted Outputs, the number of tokens in the prediction that did
// not appear in the completion. However, like reasoning tokens, these tokens are
// still counted in the total completion tokens for purposes of billing, output,
// and context window limits.
RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
JSON completionUsageCompletionTokensDetailsJSON `json:"-"`
}
// completionUsageCompletionTokensDetailsJSON contains the JSON metadata for the
// struct [CompletionUsageCompletionTokensDetails]
type completionUsageCompletionTokensDetailsJSON struct {
AcceptedPredictionTokens apijson.Field
AudioTokens apijson.Field
ReasoningTokens apijson.Field
RejectedPredictionTokens apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
func (r *CompletionUsageCompletionTokensDetails) UnmarshalJSON(data []byte) (err error) {
return apijson.UnmarshalRoot(data, r)
}
func (r completionUsageCompletionTokensDetailsJSON) RawJSON() string {
return r.raw
}
// Breakdown of tokens used in the prompt.
type CompletionUsagePromptTokensDetails struct {
// Audio input tokens present in the prompt.
AudioTokens int64 `json:"audio_tokens"`
// Cached tokens present in the prompt.
CachedTokens int64 `json:"cached_tokens"`
JSON completionUsagePromptTokensDetailsJSON `json:"-"`
}
// completionUsagePromptTokensDetailsJSON contains the JSON metadata for the struct
// [CompletionUsagePromptTokensDetails]
type completionUsagePromptTokensDetailsJSON struct {
AudioTokens apijson.Field
CachedTokens apijson.Field
raw string
ExtraFields map[string]apijson.Field
}
func (r *CompletionUsagePromptTokensDetails) UnmarshalJSON(data []byte) (err error) {
return apijson.UnmarshalRoot(data, r)
}
func (r completionUsagePromptTokensDetailsJSON) RawJSON() string {
return r.raw
}
type CompletionNewParams struct {
// ID of the model to use. You can use the
// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
// see all of your available models, or see our
// [Model overview](https://platform.openai.com/docs/models) for descriptions of
// them.
Model param.Field[CompletionNewParamsModel] `json:"model,required"`
// The prompt(s) to generate completions for, encoded as a string, array of
// strings, array of tokens, or array of token arrays.
//
// Note that <|endoftext|> is the document separator that the model sees during
// training, so if a prompt is not specified the model will generate as if from the
// beginning of a new document.
Prompt param.Field[CompletionNewParamsPromptUnion] `json:"prompt,required"`
// Generates `best_of` completions server-side and returns the "best" (the one with
// the highest log probability per token). Results cannot be streamed.
//
// When used with `n`, `best_of` controls the number of candidate completions and
// `n` specifies how many to return – `best_of` must be greater than `n`.
//
// **Note:** Because this parameter generates many completions, it can quickly
// consume your token quota. Use carefully and ensure that you have reasonable
// settings for `max_tokens` and `stop`.
BestOf param.Field[int64] `json:"best_of"`
// Echo back the prompt in addition to the completion
Echo param.Field[bool] `json:"echo"`
// Number between -2.0 and 2.0. Positive values penalize new tokens based on their
// existing frequency in the text so far, decreasing the model's likelihood to
// repeat the same line verbatim.
//
// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
FrequencyPenalty param.Field[float64] `json:"frequency_penalty"`
// Modify the likelihood of specified tokens appearing in the completion.
//
// Accepts a JSON object that maps tokens (specified by their token ID in the GPT
// tokenizer) to an associated bias value from -100 to 100. You can use this
// [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
// Mathematically, the bias is added to the logits generated by the model prior to
// sampling. The exact effect will vary per model, but values between -1 and 1
// should decrease or increase likelihood of selection; values like -100 or 100
// should result in a ban or exclusive selection of the relevant token.
//
// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
// from being generated.
LogitBias param.Field[map[string]int64] `json:"logit_bias"`
// Include the log probabilities on the `logprobs` most likely output tokens, as
// well the chosen tokens. For example, if `logprobs` is 5, the API will return a
// list of the 5 most likely tokens. The API will always return the `logprob` of
// the sampled token, so there may be up to `logprobs+1` elements in the response.
//
// The maximum value for `logprobs` is 5.
Logprobs param.Field[int64] `json:"logprobs"`
// The maximum number of [tokens](/tokenizer) that can be generated in the
// completion.
//
// The token count of your prompt plus `max_tokens` cannot exceed the model's
// context length.
// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
// for counting tokens.
MaxTokens param.Field[int64] `json:"max_tokens"`
// How many completions to generate for each prompt.
//
// **Note:** Because this parameter generates many completions, it can quickly
// consume your token quota. Use carefully and ensure that you have reasonable
// settings for `max_tokens` and `stop`.
N param.Field[int64] `json:"n"`
// Number between -2.0 and 2.0. Positive values penalize new tokens based on
// whether they appear in the text so far, increasing the model's likelihood to
// talk about new topics.
//
// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
PresencePenalty param.Field[float64] `json:"presence_penalty"`
// If specified, our system will make a best effort to sample deterministically,
// such that repeated requests with the same `seed` and parameters should return
// the same result.
//
// Determinism is not guaranteed, and you should refer to the `system_fingerprint`
// response parameter to monitor changes in the backend.
Seed param.Field[int64] `json:"seed"`
// Up to 4 sequences where the API will stop generating further tokens. The
// returned text will not contain the stop sequence.
Stop param.Field[CompletionNewParamsStopUnion] `json:"stop"`
// Options for streaming response. Only set this when you set `stream: true`.
StreamOptions param.Field[ChatCompletionStreamOptionsParam] `json:"stream_options"`
// The suffix that comes after a completion of inserted text.
//
// This parameter is only supported for `gpt-3.5-turbo-instruct`.
Suffix param.Field[string] `json:"suffix"`
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
// make the output more random, while lower values like 0.2 will make it more
// focused and deterministic.
//
// We generally recommend altering this or `top_p` but not both.
Temperature param.Field[float64] `json:"temperature"`
// An alternative to sampling with temperature, called nucleus sampling, where the
// model considers the results of the tokens with top_p probability mass. So 0.1
// means only the tokens comprising the top 10% probability mass are considered.
//
// We generally recommend altering this or `temperature` but not both.
TopP param.Field[float64] `json:"top_p"`
// A unique identifier representing your end-user, which can help OpenAI to monitor
// and detect abuse.
// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
User param.Field[string] `json:"user"`
}
func (r CompletionNewParams) MarshalJSON() (data []byte, err error) {
return apijson.MarshalRoot(r)
}
// ID of the model to use. You can use the
// [List models](https://platform.openai.com/docs/api-reference/models/list) API to
// see all of your available models, or see our
// [Model overview](https://platform.openai.com/docs/models) for descriptions of
// them.
type CompletionNewParamsModel string
const (
CompletionNewParamsModelGPT3_5TurboInstruct CompletionNewParamsModel = "gpt-3.5-turbo-instruct"
CompletionNewParamsModelDavinci002 CompletionNewParamsModel = "davinci-002"
CompletionNewParamsModelBabbage002 CompletionNewParamsModel = "babbage-002"
)
func (r CompletionNewParamsModel) IsKnown() bool {
switch r {
case CompletionNewParamsModelGPT3_5TurboInstruct, CompletionNewParamsModelDavinci002, CompletionNewParamsModelBabbage002:
return true
}
return false
}
// The prompt(s) to generate completions for, encoded as a string, array of
// strings, array of tokens, or array of token arrays.
//
// Note that <|endoftext|> is the document separator that the model sees during
// training, so if a prompt is not specified the model will generate as if from the
// beginning of a new document.
//
// Satisfied by [shared.UnionString], [CompletionNewParamsPromptArrayOfStrings],
// [CompletionNewParamsPromptArrayOfTokens],
// [CompletionNewParamsPromptArrayOfTokenArrays].
type CompletionNewParamsPromptUnion interface {
ImplementsCompletionNewParamsPromptUnion()
}
type CompletionNewParamsPromptArrayOfStrings []string
func (r CompletionNewParamsPromptArrayOfStrings) ImplementsCompletionNewParamsPromptUnion() {}
type CompletionNewParamsPromptArrayOfTokens []int64
func (r CompletionNewParamsPromptArrayOfTokens) ImplementsCompletionNewParamsPromptUnion() {}
type CompletionNewParamsPromptArrayOfTokenArrays [][]int64
func (r CompletionNewParamsPromptArrayOfTokenArrays) ImplementsCompletionNewParamsPromptUnion() {}
// Up to 4 sequences where the API will stop generating further tokens. The
// returned text will not contain the stop sequence.
//
// Satisfied by [shared.UnionString], [CompletionNewParamsStopArray].
type CompletionNewParamsStopUnion interface {
ImplementsCompletionNewParamsStopUnion()
}
type CompletionNewParamsStopArray []string
func (r CompletionNewParamsStopArray) ImplementsCompletionNewParamsStopUnion() {}