From e39b5a6f6a2b58ef461aab5cc306595a15f1ebc4 Mon Sep 17 00:00:00 2001 From: darshit-s3 <119623510+darshit-s3@users.noreply.github.com> Date: Thu, 29 Aug 2024 21:10:08 +0530 Subject: [PATCH] feat: adding vertexai tools tracing support (#172) --- CHANGELOG.md | 7 +++ package-lock.json | 4 +- package.json | 2 +- src/examples/vertexai/basic.ts | 86 ++++++++++++++++++++++++++- src/instrumentation/vertexai/patch.ts | 81 ++++++++++++++++++++++--- src/instrumentation/vertexai/types.ts | 1 + 6 files changed, 167 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03730af5..c0f69e3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # @langtrase/typescript-sdk + +## 5.3.2 + +### Patch Changes + +- Add Vertex AI tools and funcition tracing support + ## 5.3.1 ### Patch Changes diff --git a/package-lock.json b/package-lock.json index c7ba4c6e..c06cdf84 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@langtrase/typescript-sdk", - "version": "5.3.0", + "version": "5.3.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@langtrase/typescript-sdk", - "version": "5.3.0", + "version": "5.3.2", "license": "Apache-2.0", "dependencies": { "@langtrase/trace-attributes": "7.3.0", diff --git a/package.json b/package.json index a7690c85..3d5dfc79 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@langtrase/typescript-sdk", - "version": "5.3.1", + "version": "5.3.2", "description": "A typescript SDK for Langtrace", "main": "dist/index.js", "types": "dist/index.d.ts", diff --git a/src/examples/vertexai/basic.ts b/src/examples/vertexai/basic.ts index fac3ee81..4e6a619f 100644 --- a/src/examples/vertexai/basic.ts +++ b/src/examples/vertexai/basic.ts @@ -1,6 +1,6 @@ import { init } from '@langtrace-init/init' import dotenv from 'dotenv' -import { VertexAI } from '@google-cloud/vertexai' +import { VertexAI, FunctionDeclarationSchemaType } from '@google-cloud/vertexai' dotenv.config() init({ batch: false, write_spans_to_console: true }) @@ -13,6 +13,38 @@ const vertexAI = new VertexAI({ project, location }) const generativeModel = vertexAI.getGenerativeModel({ model: textModel }) +const functionDeclarations = [ + { + functionDeclarations: [ + { + name: 'get_current_weather', + description: 'get weather in a given location', + parameters: { + type: FunctionDeclarationSchemaType.OBJECT, + properties: { + location: { type: FunctionDeclarationSchemaType.STRING }, + unit: { + type: FunctionDeclarationSchemaType.STRING, + enum: ['celsius', 'fahrenheit'] + } + }, + required: ['location'] + } + } + ] + } +] + +const functionResponseParts = [ + { + functionResponse: { + name: 'get_current_weather', + response: + { name: 'get_current_weather', content: { weather: 'super nice' } } + } + } +] + export const basicVertexAIChat = async (): Promise => { const request = { contents: [{ role: 'user', parts: [{ text: 'How are you doing today?' }] }] } const result = await generativeModel.generateContent(request) @@ -65,11 +97,59 @@ export const basicVertexAIStartChatStream = async (): Promise => { for await (const item of result.stream) { const text = item.candidates?.[0]?.content?.parts?.[0]?.text if (text === undefined || text === null) { - console.log('Stream chunk: ', text) - } else { console.log('Stream chunk: No text available') + } else { + console.log('Stream chunk: ', text) } } const aggregatedResponse = await result.response console.log('Aggregated response: ', JSON.stringify(aggregatedResponse)) } + +export const basicVertexAIStartChatWithToolRequest = async (): Promise => { + const request = { + contents: [ + { role: 'user', parts: [{ text: 'What is the weather in Boston?' }] }, + { role: 'model', parts: [{ functionCall: { name: 'get_current_weather', args: { location: 'Boston' } } }] }, + { role: 'user', parts: functionResponseParts } + ], + tools: functionDeclarations + } + const streamingResult = + await generativeModel.generateContentStream(request) + for await (const item of streamingResult.stream) { + if (item?.candidates !== undefined) { + console.log(item.candidates[0]) + } + } +} + +export const basicVertexAIStartChatWithToolResponse = async (): Promise => { + // Create a chat session and pass your function declarations + const chat = generativeModel.startChat({ tools: functionDeclarations }) + + const chatInput1 = 'What is the weather in Boston?' + + // This should include a functionCall response from the model + const streamingResult1 = await chat.sendMessageStream(chatInput1) + for await (const item of streamingResult1.stream) { + if (item?.candidates !== undefined) { + console.log(item.candidates[0]) + } + } + const response1 = await streamingResult1.response + console.log('first aggregated response: ', JSON.stringify(response1)) + + // Send a follow up message with a FunctionResponse + const streamingResult2 = await chat.sendMessageStream(functionResponseParts) + for await (const item of streamingResult2.stream) { + if (item?.candidates !== undefined) { + console.log(item.candidates[0]) + } + } + + // This should include a text response from the model using the response content + // provided above + const response2 = await streamingResult2.response + console.log('second aggregated response: ', JSON.stringify(response2)) +} diff --git a/src/instrumentation/vertexai/patch.ts b/src/instrumentation/vertexai/patch.ts index c4fbcc35..0834793b 100644 --- a/src/instrumentation/vertexai/patch.ts +++ b/src/instrumentation/vertexai/patch.ts @@ -53,19 +53,62 @@ export function generateContentPatch ( const serviceProvider = Vendors.VERTEXAI const customAttributes = context.active().getValue(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY) ?? {} - const prompts = args.flatMap((arg: string | { contents: CandidateContent[] }) => { - if (typeof arg === 'string') { + let argTools: any[] = [] + const prompts = args.flatMap((arg: string | { contents?: CandidateContent[], tools?: any, functionResponse?: any }) => { + if (Array.isArray(arg)) { + // Handle the case where `arg` is an array (like [ { functionResponse: ... } ]) + return arg.flatMap(innerArg => { + if (Array.isArray(innerArg.tools)) argTools = argTools.concat(innerArg.tools) + if (innerArg.functionResponse != null) { + return [{ role: 'model', content: JSON.stringify(innerArg.functionResponse) }] + } else if (innerArg.contents != null) { + return innerArg.contents.map((content: CandidateContent) => ({ + role: content.role, + content: content.parts.map((part: CandidateContentPart) => { + if (typeof part.text === 'string') { + return part.text + } else if ('functionCall' in part) { + return JSON.stringify((part as any).functionCall) + } else if (typeof part === 'object') { + return JSON.stringify(part) + } else { + return '' + } + }).join('') + })) + } else { + return [] + } + }) + } else if (typeof arg === 'string') { // Handle the case where `arg` is a string return [{ role: 'user', content: arg }] - } else { + } else if (arg.contents != null) { + if (Array.isArray(arg.tools)) argTools = argTools.concat(arg.tools) // Handle the case where `arg` has the `contents` structure return arg.contents.map(content => ({ role: content.role, - content: content.parts.map(part => part.text).join('') + content: content.parts.map((part: CandidateContentPart) => { + if (typeof part.text === 'string') { + return part.text + } else if ('functionCall' in part) { + return JSON.stringify((part as any).functionCall) + } else if (typeof part === 'object') { + return JSON.stringify(part) + } else { + return '' + } + }).join('') })) + } else if (arg.functionResponse != null) { + // Handle the case where `arg` has a `functionResponse` structure + return [{ role: 'model', content: JSON.stringify(arg.functionResponse) }] + } else { + return [] } }) + const allTools = argTools.concat(this?.tools ?? []) const attributes: LLMSpanAttributes = { 'langtrace.sdk.name': sdkName, 'langtrace.service.name': serviceProvider, @@ -73,9 +116,20 @@ export function generateContentPatch ( 'gen_ai.operation.name': 'chat', 'langtrace.service.version': version, 'langtrace.version': langtraceVersion, - 'url.full': '', - 'url.path': this?.publisherModelEndpoint, - 'gen_ai.request.model': this?.model, + 'url.full': this?.apiEndpoint, + 'url.path': this?.publisherModelEndpoint ?? this?.resourcePath ?? undefined, + 'gen_ai.request.model': (() => { + if (this?.model !== undefined && this.model !== null) { + return this.model + } + if (typeof this?.resourcePath === 'string') { + return this.resourcePath.split('/').pop() + } + if (typeof this?.publisherModelEndpoint === 'string') { + return this.publisherModelEndpoint.split('/').pop() + } + return undefined + })(), 'http.max.retries': this?._client?.maxRetries, 'http.timeout': this?._client?.timeout, 'gen_ai.request.temperature': this?.generationConfig?.temperature, @@ -86,6 +140,7 @@ export function generateContentPatch ( 'gen_ai.request.frequency_penalty': this?.generationConfig?.frequencyPenalty, 'gen_ai.request.presence_penalty': this?.generationConfig?.presencePenalty, 'gen_ai.request.seed': this?.generationConfig?.seed, + 'gen_ai.request.tools': allTools.length > 0 ? JSON.stringify(allTools) : undefined, ...customAttributes } @@ -179,7 +234,17 @@ async function * handleStreamResponse ( const { content } = chunk.candidates.map((candidate: Candidate) => { return { role: candidate.content.role, - content: candidate.content.parts.map((part: CandidateContentPart) => part.text).join('') + content: candidate.content.parts.map((part: CandidateContentPart) => { + if (typeof part.text === 'string') { + return part.text + } else if ('functionCall' in part) { + return JSON.stringify(part.functionCall) + } else if (typeof part === 'object') { + return JSON.stringify(part) + } else { + return '' + } + }).join('') } })[0] const tokenCount = estimateTokens(content) diff --git a/src/instrumentation/vertexai/types.ts b/src/instrumentation/vertexai/types.ts index 3037e5ed..dc11065c 100644 --- a/src/instrumentation/vertexai/types.ts +++ b/src/instrumentation/vertexai/types.ts @@ -1,5 +1,6 @@ export interface CandidateContentPart { text: string + functionCall: any } export interface CandidateContent {