Skip to content

Commit

Permalink
feat: adding vertexai tools tracing support (#172)
Browse files Browse the repository at this point in the history
  • Loading branch information
darshit-s3 authored Aug 29, 2024
1 parent 7202a24 commit e39b5a6
Show file tree
Hide file tree
Showing 6 changed files with 167 additions and 14 deletions.
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
# @langtrase/typescript-sdk


## 5.3.2

### Patch Changes

- Add Vertex AI tools and funcition tracing support

## 5.3.1

### Patch Changes
Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@langtrase/typescript-sdk",
"version": "5.3.1",
"version": "5.3.2",
"description": "A typescript SDK for Langtrace",
"main": "dist/index.js",
"types": "dist/index.d.ts",
Expand Down
86 changes: 83 additions & 3 deletions src/examples/vertexai/basic.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { init } from '@langtrace-init/init'
import dotenv from 'dotenv'
import { VertexAI } from '@google-cloud/vertexai'
import { VertexAI, FunctionDeclarationSchemaType } from '@google-cloud/vertexai'

dotenv.config()
init({ batch: false, write_spans_to_console: true })
Expand All @@ -13,6 +13,38 @@ const vertexAI = new VertexAI({ project, location })

const generativeModel = vertexAI.getGenerativeModel({ model: textModel })

const functionDeclarations = [
{
functionDeclarations: [
{
name: 'get_current_weather',
description: 'get weather in a given location',
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
location: { type: FunctionDeclarationSchemaType.STRING },
unit: {
type: FunctionDeclarationSchemaType.STRING,
enum: ['celsius', 'fahrenheit']
}
},
required: ['location']
}
}
]
}
]

const functionResponseParts = [
{
functionResponse: {
name: 'get_current_weather',
response:
{ name: 'get_current_weather', content: { weather: 'super nice' } }
}
}
]

export const basicVertexAIChat = async (): Promise<void> => {
const request = { contents: [{ role: 'user', parts: [{ text: 'How are you doing today?' }] }] }
const result = await generativeModel.generateContent(request)
Expand Down Expand Up @@ -65,11 +97,59 @@ export const basicVertexAIStartChatStream = async (): Promise<void> => {
for await (const item of result.stream) {
const text = item.candidates?.[0]?.content?.parts?.[0]?.text
if (text === undefined || text === null) {
console.log('Stream chunk: ', text)
} else {
console.log('Stream chunk: No text available')
} else {
console.log('Stream chunk: ', text)
}
}
const aggregatedResponse = await result.response
console.log('Aggregated response: ', JSON.stringify(aggregatedResponse))
}

export const basicVertexAIStartChatWithToolRequest = async (): Promise<void> => {
const request = {
contents: [
{ role: 'user', parts: [{ text: 'What is the weather in Boston?' }] },
{ role: 'model', parts: [{ functionCall: { name: 'get_current_weather', args: { location: 'Boston' } } }] },
{ role: 'user', parts: functionResponseParts }
],
tools: functionDeclarations
}
const streamingResult =
await generativeModel.generateContentStream(request)
for await (const item of streamingResult.stream) {
if (item?.candidates !== undefined) {
console.log(item.candidates[0])
}
}
}

export const basicVertexAIStartChatWithToolResponse = async (): Promise<void> => {
// Create a chat session and pass your function declarations
const chat = generativeModel.startChat({ tools: functionDeclarations })

const chatInput1 = 'What is the weather in Boston?'

// This should include a functionCall response from the model
const streamingResult1 = await chat.sendMessageStream(chatInput1)
for await (const item of streamingResult1.stream) {
if (item?.candidates !== undefined) {
console.log(item.candidates[0])
}
}
const response1 = await streamingResult1.response
console.log('first aggregated response: ', JSON.stringify(response1))

// Send a follow up message with a FunctionResponse
const streamingResult2 = await chat.sendMessageStream(functionResponseParts)
for await (const item of streamingResult2.stream) {
if (item?.candidates !== undefined) {
console.log(item.candidates[0])
}
}

// This should include a text response from the model using the response content
// provided above
const response2 = await streamingResult2.response
console.log('second aggregated response: ', JSON.stringify(response2))
}
81 changes: 73 additions & 8 deletions src/instrumentation/vertexai/patch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,29 +53,83 @@ export function generateContentPatch (
const serviceProvider = Vendors.VERTEXAI
const customAttributes = context.active().getValue(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY) ?? {}

const prompts = args.flatMap((arg: string | { contents: CandidateContent[] }) => {
if (typeof arg === 'string') {
let argTools: any[] = []
const prompts = args.flatMap((arg: string | { contents?: CandidateContent[], tools?: any, functionResponse?: any }) => {
if (Array.isArray(arg)) {
// Handle the case where `arg` is an array (like [ { functionResponse: ... } ])
return arg.flatMap(innerArg => {
if (Array.isArray(innerArg.tools)) argTools = argTools.concat(innerArg.tools)
if (innerArg.functionResponse != null) {
return [{ role: 'model', content: JSON.stringify(innerArg.functionResponse) }]
} else if (innerArg.contents != null) {
return innerArg.contents.map((content: CandidateContent) => ({
role: content.role,
content: content.parts.map((part: CandidateContentPart) => {
if (typeof part.text === 'string') {
return part.text
} else if ('functionCall' in part) {
return JSON.stringify((part as any).functionCall)
} else if (typeof part === 'object') {
return JSON.stringify(part)
} else {
return ''
}
}).join('')
}))
} else {
return []
}
})
} else if (typeof arg === 'string') {
// Handle the case where `arg` is a string
return [{ role: 'user', content: arg }]
} else {
} else if (arg.contents != null) {
if (Array.isArray(arg.tools)) argTools = argTools.concat(arg.tools)
// Handle the case where `arg` has the `contents` structure
return arg.contents.map(content => ({
role: content.role,
content: content.parts.map(part => part.text).join('')
content: content.parts.map((part: CandidateContentPart) => {
if (typeof part.text === 'string') {
return part.text
} else if ('functionCall' in part) {
return JSON.stringify((part as any).functionCall)
} else if (typeof part === 'object') {
return JSON.stringify(part)
} else {
return ''
}
}).join('')
}))
} else if (arg.functionResponse != null) {
// Handle the case where `arg` has a `functionResponse` structure
return [{ role: 'model', content: JSON.stringify(arg.functionResponse) }]
} else {
return []
}
})

const allTools = argTools.concat(this?.tools ?? [])
const attributes: LLMSpanAttributes = {
'langtrace.sdk.name': sdkName,
'langtrace.service.name': serviceProvider,
'langtrace.service.type': 'llm',
'gen_ai.operation.name': 'chat',
'langtrace.service.version': version,
'langtrace.version': langtraceVersion,
'url.full': '',
'url.path': this?.publisherModelEndpoint,
'gen_ai.request.model': this?.model,
'url.full': this?.apiEndpoint,
'url.path': this?.publisherModelEndpoint ?? this?.resourcePath ?? undefined,
'gen_ai.request.model': (() => {
if (this?.model !== undefined && this.model !== null) {
return this.model
}
if (typeof this?.resourcePath === 'string') {
return this.resourcePath.split('/').pop()
}
if (typeof this?.publisherModelEndpoint === 'string') {
return this.publisherModelEndpoint.split('/').pop()
}
return undefined
})(),
'http.max.retries': this?._client?.maxRetries,
'http.timeout': this?._client?.timeout,
'gen_ai.request.temperature': this?.generationConfig?.temperature,
Expand All @@ -86,6 +140,7 @@ export function generateContentPatch (
'gen_ai.request.frequency_penalty': this?.generationConfig?.frequencyPenalty,
'gen_ai.request.presence_penalty': this?.generationConfig?.presencePenalty,
'gen_ai.request.seed': this?.generationConfig?.seed,
'gen_ai.request.tools': allTools.length > 0 ? JSON.stringify(allTools) : undefined,
...customAttributes
}

Expand Down Expand Up @@ -179,7 +234,17 @@ async function * handleStreamResponse (
const { content } = chunk.candidates.map((candidate: Candidate) => {
return {
role: candidate.content.role,
content: candidate.content.parts.map((part: CandidateContentPart) => part.text).join('')
content: candidate.content.parts.map((part: CandidateContentPart) => {
if (typeof part.text === 'string') {
return part.text
} else if ('functionCall' in part) {
return JSON.stringify(part.functionCall)
} else if (typeof part === 'object') {
return JSON.stringify(part)
} else {
return ''
}
}).join('')
}
})[0]
const tokenCount = estimateTokens(content)
Expand Down
1 change: 1 addition & 0 deletions src/instrumentation/vertexai/types.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
export interface CandidateContentPart {
text: string
functionCall: any
}

export interface CandidateContent {
Expand Down

0 comments on commit e39b5a6

Please sign in to comment.