From 98ca4a90066c94303677b279042bf5c75d38d154 Mon Sep 17 00:00:00 2001 From: Akihiko Kuroda Date: Tue, 12 Nov 2024 19:46:55 -0500 Subject: [PATCH] feat(adapters): add GCP VertexAI adapter Signed-off-by: Akihiko Kuroda --- .env.template | 6 + docs/llms.md | 1 + package.json | 12 ++ src/adapters/vertexai/chat.ts | 162 ++++++++++++++++++++++ src/adapters/vertexai/llm.test.ts | 53 +++++++ src/adapters/vertexai/llm.ts | 169 +++++++++++++++++++++++ src/adapters/vertexai/utils.ts | 51 +++++++ tests/e2e/adapters/vertexai/chat.test.ts | 64 +++++++++ tests/e2e/adapters/vertexai/llm.test.ts | 54 ++++++++ tests/e2e/utils.ts | 2 + yarn.lock | 36 +++++ 11 files changed, 610 insertions(+) create mode 100644 src/adapters/vertexai/chat.ts create mode 100644 src/adapters/vertexai/llm.test.ts create mode 100644 src/adapters/vertexai/llm.ts create mode 100644 src/adapters/vertexai/utils.ts create mode 100644 tests/e2e/adapters/vertexai/chat.test.ts create mode 100644 tests/e2e/adapters/vertexai/llm.test.ts diff --git a/.env.template b/.env.template index 46ebf9d6..a3e3933f 100644 --- a/.env.template +++ b/.env.template @@ -20,6 +20,11 @@ BEE_FRAMEWORK_LOG_SINGLE_LINE="false" # For Groq LLM Adapter # GROQ_API_KEY= +# For GCP VertexAI Adapter +# GOOGLE_APPLICATION_CREDENTIALS= +# GCP_VERTEXAI_PROJECT= +# GCP_VERTEXAI_LOCATION= + # Tools # CODE_INTERPRETER_URL=http://127.0.0.1:50051 @@ -30,3 +35,4 @@ BEE_FRAMEWORK_LOG_SINGLE_LINE="false" # For Elasticsearch Tool # ELASTICSEARCH_NODE= # ELASTICSEARCH_API_KEY= + diff --git a/docs/llms.md b/docs/llms.md index 1d4a0ae6..1fb667e3 100644 --- a/docs/llms.md +++ b/docs/llms.md @@ -20,6 +20,7 @@ To unify differences between various APIs, the framework defines a common interf | `OpenAI` | ❌ | ✅ | ⚠️ (JSON schema only) | | `LangChain` | ⚠️ (depends on a provider) | ⚠️ (depends on a provider) | ❌ | | `Groq` | ❌ | ✅ | ⚠️ (JSON object only) | +| `VertexAI` | ✅ | ✅ | ⚠️ (JSON only) | | `BAM (Internal)` | ✅ | ⚠️ (model specific template must be provided) | ✅ | | ➕ [Request](https://github.com/i-am-bee/bee-agent-framework/discussions) | | | | diff --git a/package.json b/package.json index f353ac65..f9a153f8 100644 --- a/package.json +++ b/package.json @@ -194,12 +194,14 @@ }, "peerDependencies": { "@elastic/elasticsearch": "^8.0.0", + "@google-cloud/vertexai": "*", "@googleapis/customsearch": "^3.2.0", "@grpc/grpc-js": "^1.11.3", "@grpc/proto-loader": "^0.7.13", "@ibm-generative-ai/node-sdk": "~3.2.4", "@langchain/community": ">=0.2.28", "@langchain/core": ">=0.2.27", + "google-auth-library": "*", "groq-sdk": "^0.7.0", "ollama": "^0.5.8", "openai": "^4.67.3", @@ -210,6 +212,9 @@ "@elastic/elasticsearch": { "optional": true }, + "@google-cloud/vertexai": { + "optional": true + }, "@googleapis/customsearch": { "optional": true }, @@ -228,6 +233,9 @@ "@langchain/core": { "optional": true }, + "google-auth-library": { + "optional": true + }, "groq-sdk": { "optional": true }, @@ -309,5 +317,9 @@ "vite-tsconfig-paths": "^5.0.1", "vitest": "^2.1.3", "yaml": "^2.6.0" + }, + "optionalDependencies": { + "@google-cloud/vertexai": "^1.9.0", + "google-auth-library": "^9.15.0" } } diff --git a/src/adapters/vertexai/chat.ts b/src/adapters/vertexai/chat.ts new file mode 100644 index 00000000..1de823eb --- /dev/null +++ b/src/adapters/vertexai/chat.ts @@ -0,0 +1,162 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { + AsyncStream, + BaseLLMTokenizeOutput, + ExecutionOptions, + GenerateCallbacks, + GenerateOptions, + LLMCache, + LLMMeta, +} from "@/llms/base.js"; +import { shallowCopy } from "@/serializer/utils.js"; +import type { GetRunContext } from "@/context.js"; +import { Emitter } from "@/emitter/emitter.js"; +import { VertexAI, GenerativeModel } from "@google-cloud/vertexai"; +import { ChatLLM, ChatLLMOutput } from "@/llms/chat.js"; +import { BaseMessage, Role } from "@/llms/primitives/message.js"; +import { signalRace } from "@/internals/helpers/promise.js"; +import { processContentResponse, registerGenerativeModel } from "./utils.js"; + +export class VertexAIChatLLMOutput extends ChatLLMOutput { + public readonly chunks: BaseMessage[] = []; + + constructor(chunk: BaseMessage) { + super(); + this.chunks.push(chunk); + } + + get messages() { + return this.chunks; + } + + merge(other: VertexAIChatLLMOutput): void { + this.chunks.push(...other.chunks); + } + + getTextContent(): string { + return this.chunks.map((result) => result.text).join(""); + } + + toString() { + return this.getTextContent(); + } + + createSnapshot() { + return { chunks: shallowCopy(this.chunks) }; + } + + loadSnapshot(snapshot: typeof this.createSnapshot): void { + Object.assign(this, snapshot); + } +} + +type VertexAIGenerateOptions = GenerateOptions; + +export interface VertexAIChatLLMInput { + modelId: string; + project: string; + location: string; + client?: GenerativeModel; + executionOptions?: ExecutionOptions; + cache?: LLMCache; + parameters?: Record; +} + +export class VertexAIChatLLM extends ChatLLM { + public readonly emitter: Emitter = Emitter.root.child({ + namespace: ["vertexai", "llm"], + creator: this, + }); + + protected client: GenerativeModel; + + constructor(protected readonly input: VertexAIChatLLMInput) { + super(input.modelId, input.executionOptions, input.cache); + const vertexAI = new VertexAI({ project: input.project, location: input.location }); + this.client = + input.client ?? + vertexAI.getGenerativeModel({ + model: input.modelId, + }); + } + + static { + this.register(); + registerGenerativeModel(); + } + + async meta(): Promise { + return { tokenLimit: Infinity }; + } + + async tokenize(input: BaseMessage[]): Promise { + const response = await this.client.countTokens({ + contents: input.map((msg) => ({ parts: [{ text: msg.text }], role: msg.role })), + }); + return { + tokensCount: response.totalTokens, + }; + } + + protected async _generate( + input: BaseMessage[], + options: VertexAIGenerateOptions, + run: GetRunContext, + ): Promise { + const response = await signalRace( + () => + this.client.generateContent({ + contents: input.map((msg) => ({ parts: [{ text: msg.text }], role: msg.role })), + }), + run.signal, + ); + const result = BaseMessage.of({ + role: Role.ASSISTANT, + text: processContentResponse(response.response), + }); + return new VertexAIChatLLMOutput(result); + } + + protected async *_stream( + input: BaseMessage[], + options: VertexAIGenerateOptions | undefined, + run: GetRunContext, + ): AsyncStream { + const chat = this.client.startChat(); + const response = await chat.sendMessageStream(input.map((msg) => msg.text)); + for await (const chunk of await response.stream) { + if (options?.signal?.aborted) { + break; + } + const result = BaseMessage.of({ + role: Role.ASSISTANT, + text: processContentResponse(chunk), + }); + yield new VertexAIChatLLMOutput(result); + } + run.signal.throwIfAborted(); + } + + createSnapshot() { + return { + ...super.createSnapshot(), + input: shallowCopy(this.input), + client: this.client, + }; + } +} diff --git a/src/adapters/vertexai/llm.test.ts b/src/adapters/vertexai/llm.test.ts new file mode 100644 index 00000000..22fc0ba5 --- /dev/null +++ b/src/adapters/vertexai/llm.test.ts @@ -0,0 +1,53 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { verifyDeserialization } from "@tests/e2e/utils.js"; +import { VertexAILLM } from "@/adapters/vertexai/llm.js"; +import { VertexAIChatLLM } from "@/adapters/vertexai/chat.js"; + +describe("VertexAI LLM", () => { + const getInstance = () => { + return new VertexAILLM({ + modelId: "gemini-1.5-flash-001", + location: "us-central1", + project: "systemInstruction", + }); + }; + + it("Serializes", async () => { + const instance = getInstance(); + const serialized = instance.serialize(); + const deserialized = VertexAILLM.fromSerialized(serialized); + verifyDeserialization(instance, deserialized); + }); +}); + +describe("VertexAI ChatLLM", () => { + const getInstance = () => { + return new VertexAIChatLLM({ + modelId: "gemini-1.5-flash-001", + location: "us-central1", + project: "systemInstruction", + }); + }; + + it("Serializes", async () => { + const instance = getInstance(); + const serialized = instance.serialize(); + const deserialized = VertexAIChatLLM.fromSerialized(serialized); + verifyDeserialization(instance, deserialized); + }); +}); diff --git a/src/adapters/vertexai/llm.ts b/src/adapters/vertexai/llm.ts new file mode 100644 index 00000000..e1fb42ff --- /dev/null +++ b/src/adapters/vertexai/llm.ts @@ -0,0 +1,169 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { LLM, LLMInput } from "@/llms/llm.js"; +import { + AsyncStream, + BaseLLMOutput, + BaseLLMTokenizeOutput, + ExecutionOptions, + GenerateCallbacks, + GenerateOptions, + LLMCache, + LLMMeta, +} from "@/llms/base.js"; +import { shallowCopy } from "@/serializer/utils.js"; +import type { GetRunContext } from "@/context.js"; +import { Emitter } from "@/emitter/emitter.js"; +import { VertexAI, GenerativeModel, GetGenerativeModelParams } from "@google-cloud/vertexai"; +import { Role } from "@/llms/primitives/message.js"; +import { signalRace } from "@/internals/helpers/promise.js"; +import { processContentResponse, registerGenerativeModel } from "./utils.js"; + +interface VertexAILLMChunk { + text: string; + metadata: Record; +} + +export class VertexAILLMOutput extends BaseLLMOutput { + public readonly chunks: VertexAILLMChunk[] = []; + + constructor(chunk: VertexAILLMChunk) { + super(); + this.chunks.push(chunk); + } + + merge(other: VertexAILLMOutput): void { + this.chunks.push(...other.chunks); + } + + getTextContent(): string { + return this.chunks.map((result) => result.text).join(""); + } + + toString(): string { + return this.getTextContent(); + } + + createSnapshot() { + return { chunks: shallowCopy(this.chunks) }; + } + + loadSnapshot(snapshot: ReturnType): void { + Object.assign(this, snapshot); + } +} + +export interface VertexAILLMParameters extends GenerateOptions { + [key: string]: any; + modelParams: GetGenerativeModelParams; +} + +export interface VertexAIGenerateOptions extends GenerateOptions { + parameters?: VertexAILLMParameters; +} + +export interface VertexAILLMInput { + modelId: string; + project: string; + location: string; + client?: GenerativeModel; + executionOptions?: ExecutionOptions; + cache?: LLMCache; + parameters?: Record; +} + +export class VertexAILLM extends LLM { + public readonly emitter: Emitter = Emitter.root.child({ + namespace: ["vertexai", "llm"], + creator: this, + }); + + protected client: GenerativeModel; + + constructor(protected readonly input: VertexAILLMInput) { + super(input.modelId, input.executionOptions, input.cache); + const vertexAI = new VertexAI({ project: input.project, location: input.location }); + this.client = + input.client ?? + vertexAI.getGenerativeModel({ + model: input.modelId, + }); + } + + static { + this.register(); + registerGenerativeModel(); + } + + async meta(): Promise { + return { tokenLimit: Infinity }; + } + + async tokenize(input: LLMInput): Promise { + const response = await this.client.countTokens({ + contents: [{ parts: [{ text: input }], role: Role.USER }], + }); + return { + tokensCount: response.totalTokens, + }; + } + + protected async _generate( + input: LLMInput, + options: VertexAIGenerateOptions, + run: GetRunContext, + ): Promise { + const responses = await signalRace(() => this.client.generateContent(input), run.signal); + const result: VertexAILLMChunk = { + text: processContentResponse(responses.response), + metadata: {}, + }; + return new VertexAILLMOutput(result); + } + + protected async *_stream( + input: LLMInput, + options: VertexAIGenerateOptions | undefined, + run: GetRunContext, + ): AsyncStream { + const response = await this.client.generateContentStream(input); + for await (const chunk of await response.stream) { + if (options?.signal?.aborted) { + break; + } + const result: VertexAILLMChunk = { + text: processContentResponse(chunk), + metadata: {}, + }; + yield new VertexAILLMOutput(result); + } + run.signal.throwIfAborted(); + } + + createSnapshot() { + return { + ...super.createSnapshot(), + input: shallowCopy(this.input), + client: this.client, + }; + } + + loadSnapshot({ input, ...snapshot }: ReturnType) { + super.loadSnapshot(snapshot); + Object.assign(this, { input }); + } +} diff --git a/src/adapters/vertexai/utils.ts b/src/adapters/vertexai/utils.ts new file mode 100644 index 00000000..9c35f034 --- /dev/null +++ b/src/adapters/vertexai/utils.ts @@ -0,0 +1,51 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Serializer } from "@/serializer/serializer.js"; +import { VertexAI, GenerativeModel } from "@google-cloud/vertexai"; +import { getPropStrict, getProp } from "@/internals/helpers/object.js"; +import { GenerateContentResponse } from "@google-cloud/vertexai"; + +export function processContentResponse(response: GenerateContentResponse): string { + if (response.candidates != undefined) { + return response.candidates + .map((candidate) => candidate.content.parts.map((part) => part.text!)) + .flat() + .join(); + } + return "Empty"; +} + +export function registerGenerativeModel() { + Serializer.register(GenerativeModel, { + toPlain: (value) => ({ + project: getPropStrict(value, "project"), + location: getPropStrict(value, "location"), + model: getPropStrict(value, "model"), + safetySettings: getPropStrict(value, "safetySettings"), + generationConfig: getPropStrict(value, "generationConfig"), + systemInstruction: getProp(value, ["systemInstruction"]), + }), + fromPlain: (value) => { + const vertexAI = new VertexAI({ project: value.project, location: value.location }); + return vertexAI.getGenerativeModel({ + model: value.model, + safetySettings: value.safetySettings, + generationConfig: value.generationConfig, + }); + }, + }); +} diff --git a/tests/e2e/adapters/vertexai/chat.test.ts b/tests/e2e/adapters/vertexai/chat.test.ts new file mode 100644 index 00000000..9f2dd982 --- /dev/null +++ b/tests/e2e/adapters/vertexai/chat.test.ts @@ -0,0 +1,64 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { BaseMessage } from "@/llms/primitives/message.js"; +import { expect } from "vitest"; +import { VertexAIChatLLM } from "@/adapters/vertexai/chat.js"; +import { getEnv } from "@/internals/env.js"; + +const project = getEnv("GCP_VERTEXAI_PROJECT"); +const location = getEnv("GCP_VERTEXAI_LOCATION"); + +describe.runIf(Boolean(project && location && getEnv("GOOGLE_APPLICATION_CREDENTIALS")))( + "GCP Vertex AI", + () => { + const createChatLLM = () => { + return new VertexAIChatLLM({ + modelId: "gemini-1.5-flash-001", + project: process.env.GCP_VERTEXAI_PROJECT ?? "dummy", + location: "us-central1", + }); + }; + + it("Generates", async () => { + const conversation = [ + BaseMessage.of({ + role: "user", + text: `You are a helpful and respectful and honest assistant. Your answer should be short and concise.`, + }), + ]; + const llm = createChatLLM(); + + for (const { question, answer } of [ + { question: `What is the coldest continent?`, answer: "arctica" }, + { question: "What is the most common typical animal that lives there?", answer: "penguin" }, + ]) { + conversation.push( + BaseMessage.of({ + role: "user", + text: question, + }), + ); + const response = await llm.generate(conversation); + + const newMessages = response.messages; + expect(newMessages).toHaveLength(1); + expect(newMessages[0].text.toLowerCase()).toContain(answer.toLowerCase()); + conversation.push(...newMessages); + } + }); + }, +); diff --git a/tests/e2e/adapters/vertexai/llm.test.ts b/tests/e2e/adapters/vertexai/llm.test.ts new file mode 100644 index 00000000..e7d6acd5 --- /dev/null +++ b/tests/e2e/adapters/vertexai/llm.test.ts @@ -0,0 +1,54 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { VertexAILLM, VertexAILLMOutput } from "@/adapters/vertexai/llm.js"; +import { getEnv } from "@/internals/env.js"; + +const project = getEnv("GCP_VERTEXAI_PROJECT"); +const location = getEnv("GCP_VERTEXAI_LOCATION"); + +describe.runIf(Boolean(project && location && getEnv("GOOGLE_APPLICATION_CREDENTIALS")))( + "GCP Vertex AI", + () => { + const createLLM = () => { + return new VertexAILLM({ + modelId: "gemini-1.5-flash-001", + project: project, + location: location, + }); + }; + + it("Meta", async () => { + const llm = createLLM(); + const response = await llm.meta(); + expect(response.tokenLimit).toBeGreaterThan(0); + }); + + it("Generates", async () => { + const llm = createLLM(); + const response = await llm.generate("Hello world!"); + expect(response).toBeInstanceOf(VertexAILLMOutput); + }); + + it("Streams", async () => { + const llm = createLLM(); + for await (const chunk of llm.stream("Hello world!")) { + expect(chunk).toBeInstanceOf(VertexAILLMOutput); + expect(chunk.toString()).toBeTruthy(); + } + }); + }, +); diff --git a/tests/e2e/utils.ts b/tests/e2e/utils.ts index 4a03f7b6..eaccaa18 100644 --- a/tests/e2e/utils.ts +++ b/tests/e2e/utils.ts @@ -32,6 +32,7 @@ import { Groq } from "groq-sdk"; import { customsearch_v1 } from "@googleapis/customsearch"; import { LangChainTool } from "@/adapters/langchain/tools.js"; import { Client as esClient } from "@elastic/elasticsearch"; +import { GenerativeModel } from "@google-cloud/vertexai"; interface CallbackOptions { required?: boolean; @@ -129,6 +130,7 @@ verifyDeserialization.ignoredClasses = [ RunContext, Emitter, esClient, + GenerativeModel, ] as ClassConstructor[]; verifyDeserialization.isIgnored = (key: string, value: unknown, parent?: any) => { if (verifyDeserialization.ignoredKeys.has(key)) { diff --git a/yarn.lock b/yarn.lock index 060d9257..cf17f26a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -783,6 +783,15 @@ __metadata: languageName: node linkType: hard +"@google-cloud/vertexai@npm:^1.9.0": + version: 1.9.0 + resolution: "@google-cloud/vertexai@npm:1.9.0" + dependencies: + google-auth-library: "npm:^9.1.0" + checksum: 10c0/6568fa105a180b4d27be31be92e6c71b9f8d018bb432631d63e0bb37cc299811895dbf237b9c560a1558de4f87b18882bdbb8e976ce43bf75bf80d998f395d76 + languageName: node + linkType: hard + "@googleapis/customsearch@npm:^3.2.0": version: 3.2.0 resolution: "@googleapis/customsearch@npm:3.2.0" @@ -3260,6 +3269,7 @@ __metadata: "@elastic/elasticsearch": "npm:^8.0.0" "@eslint/js": "npm:^9.13.0" "@eslint/markdown": "npm:^6.2.1" + "@google-cloud/vertexai": "npm:^1.9.0" "@googleapis/customsearch": "npm:^3.2.0" "@grpc/grpc-js": "npm:^1.12.2" "@grpc/proto-loader": "npm:^0.7.13" @@ -3298,6 +3308,7 @@ __metadata: eslint-plugin-unused-imports: "npm:^4.1.4" fast-xml-parser: "npm:^4.5.0" glob: "npm:^11.0.0" + google-auth-library: "npm:^9.15.0" groq-sdk: "npm:^0.7.0" header-generator: "npm:^2.1.56" husky: "npm:^9.1.6" @@ -3348,20 +3359,29 @@ __metadata: zod-to-json-schema: "npm:^3.23.3" peerDependencies: "@elastic/elasticsearch": ^8.0.0 + "@google-cloud/vertexai": "*" "@googleapis/customsearch": ^3.2.0 "@grpc/grpc-js": ^1.11.3 "@grpc/proto-loader": ^0.7.13 "@ibm-generative-ai/node-sdk": ~3.2.4 "@langchain/community": ">=0.2.28" "@langchain/core": ">=0.2.27" + google-auth-library: "*" groq-sdk: ^0.7.0 ollama: ^0.5.8 openai: ^4.67.3 openai-chat-tokens: ^0.2.8 sequelize: ^6.37.3 + dependenciesMeta: + "@google-cloud/vertexai": + optional: true + google-auth-library: + optional: true peerDependenciesMeta: "@elastic/elasticsearch": optional: true + "@google-cloud/vertexai": + optional: true "@googleapis/customsearch": optional: true "@grpc/grpc-js": @@ -3374,6 +3394,8 @@ __metadata: optional: true "@langchain/core": optional: true + google-auth-library: + optional: true groq-sdk: optional: true ollama: @@ -6199,6 +6221,20 @@ __metadata: languageName: node linkType: hard +"google-auth-library@npm:^9.1.0, google-auth-library@npm:^9.15.0": + version: 9.15.0 + resolution: "google-auth-library@npm:9.15.0" + dependencies: + base64-js: "npm:^1.3.0" + ecdsa-sig-formatter: "npm:^1.0.11" + gaxios: "npm:^6.1.1" + gcp-metadata: "npm:^6.1.0" + gtoken: "npm:^7.0.0" + jws: "npm:^4.0.0" + checksum: 10c0/f5a9a46e939147b181bac9b254f11dd8c2d05c15a65c9d3f2180252bef21c12af37d9893bc3caacafd226d6531a960535dbb5222ef869143f393c6a97639cc06 + languageName: node + linkType: hard + "google-auth-library@npm:^9.7.0": version: 9.14.1 resolution: "google-auth-library@npm:9.14.1"