diff --git a/.chloggen/first-gen-ai.yaml b/.chloggen/first-gen-ai.yaml new file mode 100755 index 0000000000..c6db17dab9 --- /dev/null +++ b/.chloggen/first-gen-ai.yaml @@ -0,0 +1,22 @@ +# Use this changelog template to create an entry for release notes. +# +# If your change doesn't affect end users you should instead start +# your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: new_component + +# The name of the area of concern in the attributes-registry, (e.g. http, cloud, db) +component: gen_ai + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Introducing semantic conventions for LLM applications. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +# The values here must be integers. +issues: [327] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 9880ce7604..e47436566b 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -33,10 +33,10 @@ body: - area:error - area:exception - area:faas + - area:gen-ai - area:host - area:http - area:k8s - - area:llm - area:messaging - area:network - area:oci diff --git a/.github/ISSUE_TEMPLATE/change_proposal.yaml b/.github/ISSUE_TEMPLATE/change_proposal.yaml index a70efbd965..13d3ffee93 100644 --- a/.github/ISSUE_TEMPLATE/change_proposal.yaml +++ b/.github/ISSUE_TEMPLATE/change_proposal.yaml @@ -26,10 +26,10 @@ body: - area:error - area:exception - area:faas + - area:gen-ai - area:host - area:http - area:k8s - - area:llm - area:messaging - area:network - area:oci diff --git a/.github/ISSUE_TEMPLATE/new-conventions.yaml b/.github/ISSUE_TEMPLATE/new-conventions.yaml index 84f8d9d03f..78b17633ea 100644 --- a/.github/ISSUE_TEMPLATE/new-conventions.yaml +++ b/.github/ISSUE_TEMPLATE/new-conventions.yaml @@ -35,6 +35,7 @@ body: - area:error - area:exception - area:faas + - area:gen-ai - area:host - area:http - area:k8s diff --git a/docs/attributes-registry/llm.md b/docs/attributes-registry/gen-ai.md similarity index 100% rename from docs/attributes-registry/llm.md rename to docs/attributes-registry/gen-ai.md diff --git a/docs/ai/README.md b/docs/gen-ai/README.md similarity index 100% rename from docs/ai/README.md rename to docs/gen-ai/README.md diff --git a/docs/ai/llm-spans.md b/docs/gen-ai/llm-spans.md similarity index 62% rename from docs/ai/llm-spans.md rename to docs/gen-ai/llm-spans.md index 9a58096ed0..8fea1143ed 100644 --- a/docs/ai/llm-spans.md +++ b/docs/gen-ai/llm-spans.md @@ -38,16 +38,16 @@ These attributes track input data and metadata for a request to an LLM. Each att | Attribute | Type | Description | Examples | Requirement Level | |---|---|---|---|---| -| [`gen_ai.llm.request.max_tokens`](../attributes-registry/llm.md) | int | The maximum number of tokens the LLM generates for a request. | `100` | Recommended | -| [`gen_ai.llm.request.model`](../attributes-registry/llm.md) | string | The name of the LLM a request is being made to. [1] | `gpt-4` | Required | -| [`gen_ai.llm.request.temperature`](../attributes-registry/llm.md) | double | The temperature setting for the LLM request. | `0.0` | Recommended | -| [`gen_ai.llm.request.top_p`](../attributes-registry/llm.md) | double | The top_p sampling setting for the LLM request. | `1.0` | Recommended | -| [`gen_ai.llm.response.finish_reason`](../attributes-registry/llm.md) | string[] | Array of reasons the model stopped generating tokens, corresponding to each generation received. | `[['stop']]` | Recommended | -| [`gen_ai.llm.response.id`](../attributes-registry/llm.md) | string | The unique identifier for the completion. | `chatcmpl-123` | Recommended | -| [`gen_ai.llm.response.model`](../attributes-registry/llm.md) | string | The name of the LLM a response is being made to. [2] | `gpt-4-0613` | Required | -| [`gen_ai.llm.system`](../attributes-registry/llm.md) | string | The name of the LLM foundation model vendor, if applicable. [3] | `openai` | Recommended | -| [`gen_ai.llm.usage.completion_tokens`](../attributes-registry/llm.md) | int | The number of tokens used in the LLM response (completion). | `180` | Recommended | -| [`gen_ai.llm.usage.prompt_tokens`](../attributes-registry/llm.md) | int | The number of tokens used in the LLM prompt. | `100` | Recommended | +| [`gen_ai.llm.request.max_tokens`](../attributes-registry/gen-ai.md) | int | The maximum number of tokens the LLM generates for a request. | `100` | Recommended | +| [`gen_ai.llm.request.model`](../attributes-registry/gen-ai.md) | string | The name of the LLM a request is being made to. [1] | `gpt-4` | Required | +| [`gen_ai.llm.request.temperature`](../attributes-registry/gen-ai.md) | double | The temperature setting for the LLM request. | `0.0` | Recommended | +| [`gen_ai.llm.request.top_p`](../attributes-registry/gen-ai.md) | double | The top_p sampling setting for the LLM request. | `1.0` | Recommended | +| [`gen_ai.llm.response.finish_reason`](../attributes-registry/gen-ai.md) | string[] | Array of reasons the model stopped generating tokens, corresponding to each generation received. | `[['stop']]` | Recommended | +| [`gen_ai.llm.response.id`](../attributes-registry/gen-ai.md) | string | The unique identifier for the completion. | `chatcmpl-123` | Recommended | +| [`gen_ai.llm.response.model`](../attributes-registry/gen-ai.md) | string | The name of the LLM a response is being made to. [2] | `gpt-4-0613` | Required | +| [`gen_ai.llm.system`](../attributes-registry/gen-ai.md) | string | The name of the LLM foundation model vendor, if applicable. [3] | `openai` | Recommended | +| [`gen_ai.llm.usage.completion_tokens`](../attributes-registry/gen-ai.md) | int | The number of tokens used in the LLM response (completion). | `180` | Recommended | +| [`gen_ai.llm.usage.prompt_tokens`](../attributes-registry/gen-ai.md) | int | The number of tokens used in the LLM prompt. | `100` | Recommended | **[1]:** The name of the LLM a request is being made to. If the LLM is supplied by a vendor, then the value must be the exact name of the model requested. If the LLM is a fine-tuned custom model, the value should have a more specific name than the base model that's been fine-tuned. @@ -65,7 +65,7 @@ The event name MUST be `gen_ai.llm.content.prompt`. | Attribute | Type | Description | Examples | Requirement Level | |---|---|---|---|---| -| [`gen_ai.llm.prompt`](../attributes-registry/llm.md) | string | The full prompt sent to an LLM, as a stringified JSON in OpenAI's format. [1] | `[{'role': 'user', 'content': 'What is the capital of France?'}]` | Recommended | +| [`gen_ai.llm.prompt`](../attributes-registry/gen-ai.md) | string | The full prompt sent to an LLM, as a stringified JSON in OpenAI's format. [1] | `[{'role': 'user', 'content': 'What is the capital of France?'}]` | Recommended | **[1]:** The full prompt sent to an LLM in a request, structured as a JSON in OpenAI's format. @@ -75,7 +75,7 @@ The event name MUST be `gen_ai.llm.content.completion`. | Attribute | Type | Description | Examples | Requirement Level | |---|---|---|---|---| -| [`gen_ai.llm.completion`](../attributes-registry/llm.md) | string | The full response received from the LLM, as a stringified JSON in OpenAI's format. [1] | `[{'role': 'assistant', 'content': 'The capital of France is Paris.'}]` | Recommended | +| [`gen_ai.llm.completion`](../attributes-registry/gen-ai.md) | string | The full response received from the LLM, as a stringified JSON in OpenAI's format. [1] | `[{'role': 'assistant', 'content': 'The capital of France is Paris.'}]` | Recommended | **[1]:** The full response from an LLM, structured as a JSON in OpenAI's format. diff --git a/model/registry/llm.yaml b/model/registry/gen-ai.yaml similarity index 100% rename from model/registry/llm.yaml rename to model/registry/gen-ai.yaml diff --git a/model/trace/llm.yaml b/model/trace/gen-ai.yaml similarity index 100% rename from model/trace/llm.yaml rename to model/trace/gen-ai.yaml