diff --git a/.env.template b/.env.template index 46ebf9d6..a3e3933f 100644 --- a/.env.template +++ b/.env.template @@ -20,6 +20,11 @@ BEE_FRAMEWORK_LOG_SINGLE_LINE="false" # For Groq LLM Adapter # GROQ_API_KEY= +# For GCP VertexAI Adapter +# GOOGLE_APPLICATION_CREDENTIALS= +# GCP_VERTEXAI_PROJECT= +# GCP_VERTEXAI_LOCATION= + # Tools # CODE_INTERPRETER_URL=http://127.0.0.1:50051 @@ -30,3 +35,4 @@ BEE_FRAMEWORK_LOG_SINGLE_LINE="false" # For Elasticsearch Tool # ELASTICSEARCH_NODE= # ELASTICSEARCH_API_KEY= + diff --git a/docs/llms.md b/docs/llms.md index 19f7f669..f483a528 100644 --- a/docs/llms.md +++ b/docs/llms.md @@ -21,6 +21,7 @@ To unify differences between various APIs, the framework defines a common interf | `LangChain` | ⚠️ (depends on a provider) | ⚠️ (depends on a provider) | ❌ | | `Groq` | ❌ | ✅ | ⚠️ (JSON object only) | | `AWS Bedrock` | ❌ | ✅ | ⚠️ (JSON only) - model specific | +| `VertexAI` | ✅ | ✅ | ⚠️ (JSON only) | | `BAM (Internal)` | ✅ | ⚠️ (model specific template must be provided) | ✅ | | ➕ [Request](https://github.com/i-am-bee/bee-agent-framework/discussions) | | | | diff --git a/package.json b/package.json index 2aeaa67a..fe070fa9 100644 --- a/package.json +++ b/package.json @@ -195,12 +195,14 @@ "peerDependencies": { "@aws-sdk/client-bedrock-runtime": "^3.687.0", "@elastic/elasticsearch": "^8.0.0", + "@google-cloud/vertexai": "*", "@googleapis/customsearch": "^3.2.0", "@grpc/grpc-js": "^1.11.3", "@grpc/proto-loader": "^0.7.13", "@ibm-generative-ai/node-sdk": "~3.2.4", "@langchain/community": ">=0.2.28", "@langchain/core": ">=0.2.27", + "google-auth-library": "*", "groq-sdk": "^0.7.0", "ollama": "^0.5.8", "openai": "^4.67.3", @@ -214,6 +216,9 @@ "@elastic/elasticsearch": { "optional": true }, + "@google-cloud/vertexai": { + "optional": true + }, "@googleapis/customsearch": { "optional": true }, @@ -232,6 +237,9 @@ "@langchain/core": { "optional": true }, + "google-auth-library": { + "optional": true + }, "groq-sdk": { "optional": true }, @@ -255,6 +263,7 @@ "@elastic/elasticsearch": "^8.0.0", "@eslint/js": "^9.13.0", "@eslint/markdown": "^6.2.1", + "@google-cloud/vertexai": "^1.9.0", "@googleapis/customsearch": "^3.2.0", "@grpc/grpc-js": "^1.12.2", "@grpc/proto-loader": "^0.7.13", diff --git a/src/adapters/vertexai/chat.test.ts b/src/adapters/vertexai/chat.test.ts new file mode 100644 index 00000000..d846eac4 --- /dev/null +++ b/src/adapters/vertexai/chat.test.ts @@ -0,0 +1,35 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { verifyDeserialization } from "@tests/e2e/utils.js"; +import { VertexAIChatLLM } from "@/adapters/vertexai/chat.js"; + +describe("VertexAI ChatLLM", () => { + const getInstance = () => { + return new VertexAIChatLLM({ + modelId: "gemini-1.5-flash-001", + location: "us-central1", + project: "systemInstruction", + }); + }; + + it("Serializes", async () => { + const instance = getInstance(); + const serialized = instance.serialize(); + const deserialized = VertexAIChatLLM.fromSerialized(serialized); + verifyDeserialization(instance, deserialized); + }); +}); diff --git a/src/adapters/vertexai/chat.ts b/src/adapters/vertexai/chat.ts new file mode 100644 index 00000000..77bbf5c5 --- /dev/null +++ b/src/adapters/vertexai/chat.ts @@ -0,0 +1,158 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { + AsyncStream, + BaseLLMTokenizeOutput, + ExecutionOptions, + GenerateCallbacks, + GenerateOptions, + LLMCache, + LLMMeta, +} from "@/llms/base.js"; +import { shallowCopy } from "@/serializer/utils.js"; +import type { GetRunContext } from "@/context.js"; +import { Emitter } from "@/emitter/emitter.js"; +import { VertexAI } from "@google-cloud/vertexai"; +import { ChatLLM, ChatLLMOutput } from "@/llms/chat.js"; +import { BaseMessage, Role } from "@/llms/primitives/message.js"; +import { signalRace } from "@/internals/helpers/promise.js"; +import { processContentResponse, registerVertexAI, createModel } from "./utils.js"; + +export class VertexAIChatLLMOutput extends ChatLLMOutput { + public readonly chunks: BaseMessage[] = []; + + constructor(chunk: BaseMessage) { + super(); + this.chunks.push(chunk); + } + + get messages() { + return this.chunks; + } + + merge(other: VertexAIChatLLMOutput): void { + this.chunks.push(...other.chunks); + } + + getTextContent(): string { + return this.chunks.map((result) => result.text).join(""); + } + + toString() { + return this.getTextContent(); + } + + createSnapshot() { + return { chunks: shallowCopy(this.chunks) }; + } + + loadSnapshot(snapshot: typeof this.createSnapshot): void { + Object.assign(this, snapshot); + } +} + +export interface VertexAIChatLLMInput { + modelId: string; + project: string; + location: string; + client?: VertexAI; + executionOptions?: ExecutionOptions; + cache?: LLMCache; + parameters?: Record; +} + +export class VertexAIChatLLM extends ChatLLM { + public readonly emitter: Emitter = Emitter.root.child({ + namespace: ["vertexai", "llm"], + creator: this, + }); + + protected client: VertexAI; + + constructor(protected readonly input: VertexAIChatLLMInput) { + super(input.modelId, input.executionOptions, input.cache); + this.client = new VertexAI({ project: input.project, location: input.location }); + } + + static { + this.register(); + registerVertexAI(); + } + + async meta(): Promise { + return { tokenLimit: Infinity }; + } + + async tokenize(input: BaseMessage[]): Promise { + const generativeModel = createModel(this.client, this.modelId); + const response = await generativeModel.countTokens({ + contents: input.map((msg) => ({ parts: [{ text: msg.text }], role: msg.role })), + }); + return { + tokensCount: response.totalTokens, + }; + } + + protected async _generate( + input: BaseMessage[], + options: GenerateOptions, + run: GetRunContext, + ): Promise { + const generativeModel = createModel(this.client, this.modelId, options.guided?.json); + const response = await signalRace( + () => + generativeModel.generateContent({ + contents: input.map((msg) => ({ parts: [{ text: msg.text }], role: msg.role })), + }), + run.signal, + ); + const result = BaseMessage.of({ + role: Role.ASSISTANT, + text: processContentResponse(response.response), + }); + return new VertexAIChatLLMOutput(result); + } + + protected async *_stream( + input: BaseMessage[], + options: GenerateOptions | undefined, + run: GetRunContext, + ): AsyncStream { + const generativeModel = createModel(this.client, this.modelId, options?.guided?.json); + const chat = generativeModel.startChat(); + const response = await chat.sendMessageStream(input.map((msg) => msg.text)); + for await (const chunk of await response.stream) { + if (options?.signal?.aborted) { + break; + } + const result = BaseMessage.of({ + role: Role.ASSISTANT, + text: processContentResponse(chunk), + }); + yield new VertexAIChatLLMOutput(result); + } + run.signal.throwIfAborted(); + } + + createSnapshot() { + return { + ...super.createSnapshot(), + input: shallowCopy(this.input), + client: this.client, + }; + } +} diff --git a/src/adapters/vertexai/llm.test.ts b/src/adapters/vertexai/llm.test.ts new file mode 100644 index 00000000..a7bb4f35 --- /dev/null +++ b/src/adapters/vertexai/llm.test.ts @@ -0,0 +1,35 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { verifyDeserialization } from "@tests/e2e/utils.js"; +import { VertexAILLM } from "@/adapters/vertexai/llm.js"; + +describe("VertexAI LLM", () => { + const getInstance = () => { + return new VertexAILLM({ + modelId: "gemini-1.5-flash-001", + location: "us-central1", + project: "systemInstruction", + }); + }; + + it("Serializes", async () => { + const instance = getInstance(); + const serialized = instance.serialize(); + const deserialized = VertexAILLM.fromSerialized(serialized); + verifyDeserialization(instance, deserialized); + }); +}); diff --git a/src/adapters/vertexai/llm.ts b/src/adapters/vertexai/llm.ts new file mode 100644 index 00000000..a2a44ca8 --- /dev/null +++ b/src/adapters/vertexai/llm.ts @@ -0,0 +1,159 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { LLM, LLMInput } from "@/llms/llm.js"; +import { + AsyncStream, + BaseLLMOutput, + BaseLLMTokenizeOutput, + ExecutionOptions, + GenerateCallbacks, + GenerateOptions, + LLMCache, + LLMMeta, +} from "@/llms/base.js"; +import { shallowCopy } from "@/serializer/utils.js"; +import type { GetRunContext } from "@/context.js"; +import { Emitter } from "@/emitter/emitter.js"; +import { VertexAI } from "@google-cloud/vertexai"; +import { Role } from "@/llms/primitives/message.js"; +import { signalRace } from "@/internals/helpers/promise.js"; +import { processContentResponse, getTokenCount, registerVertexAI, createModel } from "./utils.js"; + +interface VertexAILLMChunk { + text: string; + metadata: Record; +} + +export class VertexAILLMOutput extends BaseLLMOutput { + public readonly chunks: VertexAILLMChunk[] = []; + + constructor(chunk: VertexAILLMChunk) { + super(); + this.chunks.push(chunk); + } + + merge(other: VertexAILLMOutput): void { + this.chunks.push(...other.chunks); + } + + getTextContent(): string { + return this.chunks.map((result) => result.text).join(""); + } + + toString(): string { + return this.getTextContent(); + } + + createSnapshot() { + return { chunks: shallowCopy(this.chunks) }; + } + + loadSnapshot(snapshot: ReturnType): void { + Object.assign(this, snapshot); + } +} + +export interface VertexAILLMInput { + modelId: string; + project: string; + location: string; + client?: VertexAI; + executionOptions?: ExecutionOptions; + cache?: LLMCache; + parameters?: Record; +} + +export class VertexAILLM extends LLM { + public readonly emitter: Emitter = Emitter.root.child({ + namespace: ["vertexai", "llm"], + creator: this, + }); + + protected client: VertexAI; + + constructor(protected readonly input: VertexAILLMInput) { + super(input.modelId, input.executionOptions, input.cache); + this.client = + input.client ?? new VertexAI({ project: input.project, location: input.location }); + } + + static { + this.register(); + registerVertexAI(); + } + + async meta(): Promise { + return { tokenLimit: Infinity }; + } + + async tokenize(input: LLMInput): Promise { + const generativeModel = createModel(this.client, this.modelId); + const response = await generativeModel.countTokens({ + contents: [{ parts: [{ text: input }], role: Role.USER }], + }); + return { + tokensCount: response.totalTokens, + }; + } + + protected async _generate( + input: LLMInput, + options: GenerateOptions, + run: GetRunContext, + ): Promise { + const generativeModel = createModel(this.client, this.modelId, options.guided?.json); + const responses = await signalRace(() => generativeModel.generateContent(input), run.signal); + const result: VertexAILLMChunk = { + text: processContentResponse(responses.response), + metadata: { tokenCount: getTokenCount(responses.response) }, + }; + return new VertexAILLMOutput(result); + } + + protected async *_stream( + input: LLMInput, + options: GenerateOptions | undefined, + run: GetRunContext, + ): AsyncStream { + const generativeModel = createModel(this.client, this.modelId, options?.guided?.json); + const response = await generativeModel.generateContentStream(input); + for await (const chunk of await response.stream) { + if (options?.signal?.aborted) { + break; + } + const result: VertexAILLMChunk = { + text: processContentResponse(chunk), + metadata: { tokenCount: getTokenCount(chunk) }, + }; + yield new VertexAILLMOutput(result); + } + run.signal.throwIfAborted(); + } + + createSnapshot() { + return { + ...super.createSnapshot(), + input: shallowCopy(this.input), + client: this.client, + }; + } + + loadSnapshot({ input, ...snapshot }: ReturnType) { + super.loadSnapshot(snapshot); + Object.assign(this, { input }); + } +} diff --git a/src/adapters/vertexai/utils.ts b/src/adapters/vertexai/utils.ts new file mode 100644 index 00000000..7f482485 --- /dev/null +++ b/src/adapters/vertexai/utils.ts @@ -0,0 +1,61 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { isString } from "remeda"; +import { Serializer } from "@/serializer/serializer.js"; +import { VertexAI, GenerativeModel, ModelParams } from "@google-cloud/vertexai"; +import { getPropStrict } from "@/internals/helpers/object.js"; +import { GenerateContentResponse } from "@google-cloud/vertexai"; + +export function processContentResponse(response: GenerateContentResponse): string { + return ( + response.candidates + ?.flatMap((candidate) => + candidate.content.parts.filter((part) => part.text).map((part) => part.text!), + ) + .join() || "Empty" + ); +} + +export function getTokenCount(response: GenerateContentResponse): number { + return response.usageMetadata?.totalTokenCount ?? Infinity; +} + +export function registerVertexAI() { + Serializer.register(VertexAI, { + toPlain: (value) => ({ + project: getPropStrict(value, "project"), + location: getPropStrict(value, "location"), + }), + fromPlain: (value) => { + return new VertexAI({ project: value.project, location: value.location }); + }, + }); +} + +export function createModel( + client: VertexAI, + modelId: string, + schema?: string | Record, +): GenerativeModel { + const modelParams: ModelParams = { model: modelId }; + if (schema) { + const schemaJson = isString(schema) ? JSON.parse(schema) : schema; + const generationConfig = { responseSchema: schemaJson, responseMimeType: "application/json" }; + modelParams.generationConfig = generationConfig; + } + return client.getGenerativeModel(modelParams); +} diff --git a/tests/e2e/adapters/vertexai/chat.test.ts b/tests/e2e/adapters/vertexai/chat.test.ts new file mode 100644 index 00000000..9f2dd982 --- /dev/null +++ b/tests/e2e/adapters/vertexai/chat.test.ts @@ -0,0 +1,64 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { BaseMessage } from "@/llms/primitives/message.js"; +import { expect } from "vitest"; +import { VertexAIChatLLM } from "@/adapters/vertexai/chat.js"; +import { getEnv } from "@/internals/env.js"; + +const project = getEnv("GCP_VERTEXAI_PROJECT"); +const location = getEnv("GCP_VERTEXAI_LOCATION"); + +describe.runIf(Boolean(project && location && getEnv("GOOGLE_APPLICATION_CREDENTIALS")))( + "GCP Vertex AI", + () => { + const createChatLLM = () => { + return new VertexAIChatLLM({ + modelId: "gemini-1.5-flash-001", + project: process.env.GCP_VERTEXAI_PROJECT ?? "dummy", + location: "us-central1", + }); + }; + + it("Generates", async () => { + const conversation = [ + BaseMessage.of({ + role: "user", + text: `You are a helpful and respectful and honest assistant. Your answer should be short and concise.`, + }), + ]; + const llm = createChatLLM(); + + for (const { question, answer } of [ + { question: `What is the coldest continent?`, answer: "arctica" }, + { question: "What is the most common typical animal that lives there?", answer: "penguin" }, + ]) { + conversation.push( + BaseMessage.of({ + role: "user", + text: question, + }), + ); + const response = await llm.generate(conversation); + + const newMessages = response.messages; + expect(newMessages).toHaveLength(1); + expect(newMessages[0].text.toLowerCase()).toContain(answer.toLowerCase()); + conversation.push(...newMessages); + } + }); + }, +); diff --git a/tests/e2e/adapters/vertexai/llm.test.ts b/tests/e2e/adapters/vertexai/llm.test.ts new file mode 100644 index 00000000..e7d6acd5 --- /dev/null +++ b/tests/e2e/adapters/vertexai/llm.test.ts @@ -0,0 +1,54 @@ +/** + * Copyright 2024 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { VertexAILLM, VertexAILLMOutput } from "@/adapters/vertexai/llm.js"; +import { getEnv } from "@/internals/env.js"; + +const project = getEnv("GCP_VERTEXAI_PROJECT"); +const location = getEnv("GCP_VERTEXAI_LOCATION"); + +describe.runIf(Boolean(project && location && getEnv("GOOGLE_APPLICATION_CREDENTIALS")))( + "GCP Vertex AI", + () => { + const createLLM = () => { + return new VertexAILLM({ + modelId: "gemini-1.5-flash-001", + project: project, + location: location, + }); + }; + + it("Meta", async () => { + const llm = createLLM(); + const response = await llm.meta(); + expect(response.tokenLimit).toBeGreaterThan(0); + }); + + it("Generates", async () => { + const llm = createLLM(); + const response = await llm.generate("Hello world!"); + expect(response).toBeInstanceOf(VertexAILLMOutput); + }); + + it("Streams", async () => { + const llm = createLLM(); + for await (const chunk of llm.stream("Hello world!")) { + expect(chunk).toBeInstanceOf(VertexAILLMOutput); + expect(chunk.toString()).toBeTruthy(); + } + }); + }, +); diff --git a/tests/e2e/utils.ts b/tests/e2e/utils.ts index 663ee2ce..da69121e 100644 --- a/tests/e2e/utils.ts +++ b/tests/e2e/utils.ts @@ -33,6 +33,7 @@ import { customsearch_v1 } from "@googleapis/customsearch"; import { LangChainTool } from "@/adapters/langchain/tools.js"; import { Client as esClient } from "@elastic/elasticsearch"; import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; +import { VertexAI } from "@google-cloud/vertexai"; interface CallbackOptions { required?: boolean; @@ -131,6 +132,7 @@ verifyDeserialization.ignoredClasses = [ Emitter, esClient, BedrockRuntimeClient, + VertexAI, ] as ClassConstructor[]; verifyDeserialization.isIgnored = (key: string, value: unknown, parent?: any) => { if (verifyDeserialization.ignoredKeys.has(key)) { diff --git a/yarn.lock b/yarn.lock index 5c52cda2..334fb92b 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1313,6 +1313,15 @@ __metadata: languageName: node linkType: hard +"@google-cloud/vertexai@npm:^1.9.0": + version: 1.9.0 + resolution: "@google-cloud/vertexai@npm:1.9.0" + dependencies: + google-auth-library: "npm:^9.1.0" + checksum: 10c0/6568fa105a180b4d27be31be92e6c71b9f8d018bb432631d63e0bb37cc299811895dbf237b9c560a1558de4f87b18882bdbb8e976ce43bf75bf80d998f395d76 + languageName: node + linkType: hard + "@googleapis/customsearch@npm:^3.2.0": version: 3.2.0 resolution: "@googleapis/customsearch@npm:3.2.0" @@ -4323,6 +4332,7 @@ __metadata: "@elastic/elasticsearch": "npm:^8.0.0" "@eslint/js": "npm:^9.13.0" "@eslint/markdown": "npm:^6.2.1" + "@google-cloud/vertexai": "npm:^1.9.0" "@googleapis/customsearch": "npm:^3.2.0" "@grpc/grpc-js": "npm:^1.12.2" "@grpc/proto-loader": "npm:^0.7.13" @@ -4412,12 +4422,14 @@ __metadata: peerDependencies: "@aws-sdk/client-bedrock-runtime": ^3.687.0 "@elastic/elasticsearch": ^8.0.0 + "@google-cloud/vertexai": "*" "@googleapis/customsearch": ^3.2.0 "@grpc/grpc-js": ^1.11.3 "@grpc/proto-loader": ^0.7.13 "@ibm-generative-ai/node-sdk": ~3.2.4 "@langchain/community": ">=0.2.28" "@langchain/core": ">=0.2.27" + google-auth-library: "*" groq-sdk: ^0.7.0 ollama: ^0.5.8 openai: ^4.67.3 @@ -4428,6 +4440,8 @@ __metadata: optional: true "@elastic/elasticsearch": optional: true + "@google-cloud/vertexai": + optional: true "@googleapis/customsearch": optional: true "@grpc/grpc-js": @@ -4440,6 +4454,8 @@ __metadata: optional: true "@langchain/core": optional: true + google-auth-library: + optional: true groq-sdk: optional: true ollama: @@ -7283,6 +7299,20 @@ __metadata: languageName: node linkType: hard +"google-auth-library@npm:^9.1.0": + version: 9.15.0 + resolution: "google-auth-library@npm:9.15.0" + dependencies: + base64-js: "npm:^1.3.0" + ecdsa-sig-formatter: "npm:^1.0.11" + gaxios: "npm:^6.1.1" + gcp-metadata: "npm:^6.1.0" + gtoken: "npm:^7.0.0" + jws: "npm:^4.0.0" + checksum: 10c0/f5a9a46e939147b181bac9b254f11dd8c2d05c15a65c9d3f2180252bef21c12af37d9893bc3caacafd226d6531a960535dbb5222ef869143f393c6a97639cc06 + languageName: node + linkType: hard + "google-auth-library@npm:^9.7.0": version: 9.14.1 resolution: "google-auth-library@npm:9.14.1"