diff --git a/specification/cognitiveservices/OpenAI.Inference/models/completions.create.cadl b/specification/cognitiveservices/OpenAI.Inference/models/completions.create.cadl index 8093d171b5b4..fd602657dff8 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/completions.create.cadl +++ b/specification/cognitiveservices/OpenAI.Inference/models/completions.create.cadl @@ -162,10 +162,13 @@ model CompletionsLogProbs { text_offset?: int32[]; } -@doc("Measurment of the amount of tokens used in this request and response") +@doc(""" +Representation of the token counts processed for a completions request. +Counts consider all tokens across prompts, choices, choice alternates, best_of generations, and other consumers. +""") model CompletionsUsage { @doc("Number of tokens received in the completion") - completion_token: int32, + completion_tokens: int32, @doc("Number of tokens sent in the original request") prompt_tokens: int32, @doc("Total number of tokens transacted in this request/response")