diff --git a/samples/node/advanced-chat.js b/samples/node/advanced-chat.js deleted file mode 100644 index 4c6db56d..00000000 --- a/samples/node/advanced-chat.js +++ /dev/null @@ -1,61 +0,0 @@ -/** - * @license - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { - genAI, - displayChatTokenCount, - streamToStdout, -} from "./utils/common.js"; - -async function run() { - const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash-latest" }); - - const chat = model.startChat({ - history: [ - { - role: "user", - parts: [{text: "Hello, I have 2 dogs in my house."}], - }, - { - role: "model", - parts: [{text: "Great to meet you. What would you like to know?"}], - }, - ], - generationConfig: { - maxOutputTokens: 100, - }, - }); - - const msg1 = "How many paws are in my house?"; - displayChatTokenCount(model, chat, msg1); - const result1 = await chat.sendMessageStream(msg1); - await streamToStdout(result1.stream); - - const msg2 = "How many noses (including mine)?"; - displayChatTokenCount(model, chat, msg2); - const result2 = await chat.sendMessageStream(msg2); - await streamToStdout(result2.stream); - - // Display history - console.log(JSON.stringify(await chat.getHistory(), null, 2)); - - // Display the last aggregated response - const response = await result2.response; - console.log(JSON.stringify(response, null, 2)); -} - -run(); diff --git a/samples/node/advanced-code-execution.js b/samples/node/advanced-code-execution.js deleted file mode 100644 index 43de8527..00000000 --- a/samples/node/advanced-code-execution.js +++ /dev/null @@ -1,38 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { genAI } from "./utils/common.js"; - -async function run() { - const tools = [ - { - codeExecution: {}, - }, - ]; - - const model = genAI.getGenerativeModel( - { model: "gemini-1.5-flash-latest", tools } - ); - - const result = await model.generateContent( - "What are the last 4 digits of the sum of the first 70 prime numbers?", - ); - const response = result.response; - console.log(response.text()); -} - -run(); diff --git a/samples/node/advanced-embeddings.js b/samples/node/advanced-embeddings.js deleted file mode 100644 index 2c63d1b2..00000000 --- a/samples/node/advanced-embeddings.js +++ /dev/null @@ -1,80 +0,0 @@ -/** - * @license - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { TaskType } from "@google/generative-ai"; -import { genAI } from "./utils/common.js"; - -const model = genAI.getGenerativeModel({ model: "embedding-001" }); - -async function embedRetrivalQuery(queryText) { - const result = await model.embedContent({ - content: { parts: [{ text: queryText }] }, - taskType: TaskType.RETRIEVAL_QUERY, - }); - const embedding = result.embedding; - return embedding.values; -} - -async function embedRetrivalDocuments(docTexts) { - const result = await model.batchEmbedContents({ - requests: docTexts.map((t) => ({ - content: { parts: [{ text: t }] }, - taskType: TaskType.RETRIEVAL_DOCUMENT, - })), - }); - const embeddings = result.embeddings; - return embeddings.map((e, i) => ({ text: docTexts[i], values: e.values })); -} - -// Returns Euclidean Distance between 2 vectors -function euclideanDistance(a, b) { - let sum = 0; - for (let n = 0; n < a.length; n++) { - sum += Math.pow(a[n] - b[n], 2); - } - return Math.sqrt(sum); -} - -// Performs a relevance search for queryText in relation to a known list of embeddings -async function performQuery(queryText, docs) { - const queryValues = await embedRetrivalQuery(queryText); - console.log(queryText); - for (const doc of docs) { - console.log( - " ", - euclideanDistance(doc.values, queryValues), - doc.text.substr(0, 40), - ); - } -} - -async function run() { - // Precompute embeddings for our documents - const docs = await embedRetrivalDocuments([ - "The quick brown fox jumps over the lazy dog.", - "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.", - "Organize the world's information and make it universally accessible and useful.", - ]); - - // Use retrieval query embeddings to find most relevant documents - await performQuery("Google", docs); - await performQuery("Placeholder text", docs); - await performQuery("lorem ipsum", docs); - await performQuery("Agile living being", docs); -} - -run(); diff --git a/samples/node/cache.js b/samples/node/cache.js new file mode 100644 index 00000000..2e991f38 --- /dev/null +++ b/samples/node/cache.js @@ -0,0 +1,290 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { GoogleGenerativeAI } from "@google/generative-ai"; +import { + GoogleAICacheManager, + GoogleAIFileManager, +} from "@google/generative-ai/server"; +import { dirname } from "path"; +import { fileURLToPath } from "url"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const mediaPath = __dirname + "/media"; + +async function cacheCreate() { + // [START cache_create] + const cacheManager = new GoogleAICacheManager(process.env.API_KEY); + const fileManager = new GoogleAIFileManager(process.env.API_KEY); + + const uploadResult = await fileManager.uploadFile(`${mediaPath}/a11.txt`, { + mimeType: "text/plain", + }); + + const cacheResult = await cacheManager.create({ + model: "models/gemini-1.5-flash-001", + contents: [ + { + role: "user", + parts: [ + { + fileData: { + fileUri: uploadResult.file.uri, + mimeType: uploadResult.file.mimeType, + }, + }, + ], + }, + ], + }); + + console.log(cacheResult); + + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModelFromCachedContent(cacheResult); + const result = await model.generateContent( + "Please summarize this transcript.", + ); + console.log(result.response.text()); + // [END cache_create] + await cacheManager.delete(cacheResult.name); +} + +async function cacheCreateFromName() { + // [START cache_create_from_name] + const cacheManager = new GoogleAICacheManager(process.env.API_KEY); + const fileManager = new GoogleAIFileManager(process.env.API_KEY); + + const uploadResult = await fileManager.uploadFile(`${mediaPath}/a11.txt`, { + mimeType: "text/plain", + }); + + const cacheResult = await cacheManager.create({ + model: "models/gemini-1.5-flash-001", + contents: [ + { + role: "user", + parts: [ + { + fileData: { + fileUri: uploadResult.file.uri, + mimeType: uploadResult.file.mimeType, + }, + }, + ], + }, + ], + }); + const cacheName = cacheResult.name; // Save the name for later. + + // Later + const getCacheResult = await cacheManager.get(cacheName); + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModelFromCachedContent(getCacheResult); + model.generateContent("Please summarize this transcript."); + // [END cache_create_from_name] + await cacheManager.delete(cacheResult.name); +} + +async function cacheCreateFromChat() { + // [START cache_create_from_chat] + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const cacheManager = new GoogleAICacheManager(process.env.API_KEY); + const fileManager = new GoogleAIFileManager(process.env.API_KEY); + + const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash-001" }); + const chat = model.startChat(); + + const uploadResult = await fileManager.uploadFile(`${mediaPath}/a11.txt`, { + mimeType: "text/plain", + }); + + let result = await chat.sendMessage([ + "Hi, could you summarize this transcript?", + { + fileData: { + fileUri: uploadResult.file.uri, + mimeType: uploadResult.file.mimeType, + }, + }, + ]); + console.log(`\n\nmodel: ${result.response.text()}`); + result = await chat.sendMessage( + "Okay, could you tell me more about the trans-lunar injection", + ); + console.log(`\n\nmodel: ${result.response.text()}`); + + const cacheResult = await cacheManager.create({ + model: "models/gemini-1.5-flash-001", + contents: await chat.getHistory(), + }); + + const newModel = genAI.getGenerativeModelFromCachedContent(cacheResult); + + const newChat = newModel.startChat(); + result = await newChat.sendMessage( + "I didn't understand that last part, could you explain it in simpler language?", + ); + console.log(`\n\nmodel: ${result.response.text()}`); + // [END cache_create_from_chat] + + await cacheManager.delete(cacheResult.name); +} + +async function cacheDelete() { + // [START cache_delete] + const cacheManager = new GoogleAICacheManager(process.env.API_KEY); + const fileManager = new GoogleAIFileManager(process.env.API_KEY); + + const uploadResult = await fileManager.uploadFile(`${mediaPath}/a11.txt`, { + mimeType: "text/plain", + }); + + const cacheResult = await cacheManager.create({ + model: "models/gemini-1.5-flash-001", + contents: [ + { + role: "user", + parts: [ + { + fileData: { + fileUri: uploadResult.file.uri, + mimeType: uploadResult.file.mimeType, + }, + }, + ], + }, + ], + }); + await cacheManager.delete(cacheResult.name); + // [END cache_delete] +} + +async function cacheGet() { + // [START cache_get] + const cacheManager = new GoogleAICacheManager(process.env.API_KEY); + const fileManager = new GoogleAIFileManager(process.env.API_KEY); + + const uploadResult = await fileManager.uploadFile(`${mediaPath}/a11.txt`, { + mimeType: "text/plain", + }); + + const cacheResult = await cacheManager.create({ + model: "models/gemini-1.5-flash-001", + contents: [ + { + role: "user", + parts: [ + { + fileData: { + fileUri: uploadResult.file.uri, + mimeType: uploadResult.file.mimeType, + }, + }, + ], + }, + ], + }); + const cacheGetResult = await cacheManager.get(cacheResult.name); + console.log(cacheGetResult); + // [END cache_get] + await cacheManager.delete(cacheResult.name); +} + +async function cacheList() { + // [START cache_list] + const cacheManager = new GoogleAICacheManager(process.env.API_KEY); + const fileManager = new GoogleAIFileManager(process.env.API_KEY); + + const uploadResult = await fileManager.uploadFile(`${mediaPath}/a11.txt`, { + mimeType: "text/plain", + }); + + const cacheResult = await cacheManager.create({ + model: "models/gemini-1.5-flash-001", + contents: [ + { + role: "user", + parts: [ + { + fileData: { + fileUri: uploadResult.file.uri, + mimeType: uploadResult.file.mimeType, + }, + }, + ], + }, + ], + }); + console.log("My caches:"); + const cacheListResult = await cacheManager.list(); + for (const item of cacheListResult.cachedContents) { + console.log(item); + } + // [END cache_list] + await cacheManager.delete(cacheResult.name); +} + +async function cacheUpdate() { + // [START cache_update] + const cacheManager = new GoogleAICacheManager(process.env.API_KEY); + const fileManager = new GoogleAIFileManager(process.env.API_KEY); + + const uploadResult = await fileManager.uploadFile(`${mediaPath}/a11.txt`, { + mimeType: "text/plain", + }); + + const cacheResult = await cacheManager.create({ + model: "models/gemini-1.5-flash-001", + contents: [ + { + role: "user", + parts: [ + { + fileData: { + fileUri: uploadResult.file.uri, + mimeType: uploadResult.file.mimeType, + }, + }, + ], + }, + ], + }); + console.log("initial cache data:", cacheResult); + const cacheUpdateResult = await cacheManager.update(cacheResult.name, { + cachedContent: { + // 2 hours + ttlSeconds: 60 * 60 * 2, + }, + }); + console.log("updated cache data:", cacheUpdateResult); + // [END cache_update] + await cacheManager.delete(cacheResult.name); +} + +async function runAll() { + // Comment out or delete any sample cases you don't want to run. + await cacheCreate(); + await cacheCreateFromName(); + await cacheCreateFromChat(); + await cacheDelete(); + await cacheGet(); + await cacheList(); + await cacheUpdate(); +} + +runAll(); diff --git a/samples/node/chat.js b/samples/node/chat.js new file mode 100644 index 00000000..b3b8712d --- /dev/null +++ b/samples/node/chat.js @@ -0,0 +1,111 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { GoogleGenerativeAI } from "@google/generative-ai"; +import fs from "fs"; +import { dirname } from "path"; +import { fileURLToPath } from "url"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const mediaPath = __dirname + "/media"; + +async function chat() { + // [START chat] + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); + const chat = model.startChat({ + history: [ + { + role: "user", + parts: [{ text: "Hello" }], + }, + { + role: "model", + parts: [{ text: "Great to meet you. What would you like to know?" }], + }, + ], + }); + let result = await chat.sendMessage("I have 2 dogs in my house."); + console.log(result.response.text()); + result = await chat.sendMessage("How many paws are in my house?"); + console.log(result.response.text()); + // [END chat] +} + +async function chatStreaming() { + // [START chat_streaming] + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); + const chat = model.startChat({ + history: [ + { + role: "user", + parts: [{ text: "Hello" }], + }, + { + role: "model", + parts: [{ text: "Great to meet you. What would you like to know?" }], + }, + ], + }); + let result = await chat.sendMessageStream("I have 2 dogs in my house."); + for await (const chunk of result.stream) { + const chunkText = chunk.text(); + process.stdout.write(chunkText); + } + result = await chat.sendMessageStream("How many paws are in my house?"); + for await (const chunk of result.stream) { + const chunkText = chunk.text(); + process.stdout.write(chunkText); + } + // [END chat_streaming] +} + +async function chatStreamingWithImages() { + // [START chat_streaming_with_images] + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); + const chat = model.startChat(); + + let result = await chat.sendMessageStream("Hello, I'm designing inventions. Can I show you one?"); + process.stdout.write('\n\nmodel:\n'); + for await (const chunk of result.stream) { + const chunkText = chunk.text(); + process.stdout.write(chunkText); + } + result = await chat.sendMessageStream(["What do you think about this design?", { + inlineData: { + data: Buffer.from(fs.readFileSync(`${mediaPath}/jetpack.jpg`)).toString("base64"), + mimeType: "image/jpeg", + }, + }]); + process.stdout.write('\n\nmodel:\n'); + for await (const chunk of result.stream) { + const chunkText = chunk.text(); + process.stdout.write(chunkText); + } + // [END chat_streaming_with_images] +} + +async function runAll() { + // Comment out or delete any sample cases you don't want to run. + await chat(); + await chatStreaming(); + await chatStreamingWithImages(); +} + +runAll(); diff --git a/samples/node/code_execution.js b/samples/node/code_execution.js new file mode 100644 index 00000000..cd3fdbbb --- /dev/null +++ b/samples/node/code_execution.js @@ -0,0 +1,92 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { GoogleGenerativeAI } from "@google/generative-ai"; + +async function codeExecutionBasic() { + // [START code_execution_basic] + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash", + tools: [{ codeExecution: {} }], + }); + + const result = await model.generateContent( + "What is the sum of the first 50 prime numbers? " + + "Generate and run code for the calculation, and make sure you get " + + "all 50.", + ); + + console.log(result.response.text()); + // [END code_execution_basic] +} + +async function codeExecutionRequestOverride() { + // [START code_execution_request_override] + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash", + }); + + const result = await model.generateContent({ + contents: [ + { + role: "user", + parts: [ + { + text: + "What is the sum of the first 50 prime numbers? " + + "Generate and run code for the calculation, and make sure you " + + "get all 50.", + }, + ], + }, + ], + tools: [{ codeExecution: {} }], + }); + + console.log(result.response.text()); + // [END code_execution_request_override] +} + +async function codeExecutionChat() { + // [START code_execution_chat] + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash", + tools: [{ codeExecution: {} }], + }); + const chat = model.startChat(); + + const result = await chat.sendMessage( + "What is the sum of the first 50 prime numbers? " + + "Generate and run code for the calculation, and make sure you get " + + "all 50.", + ); + + console.log(result.response.text()); + // [END code_execution_chat] +} + +async function runAll() { + // Comment out or delete any sample cases you don't want to run. + await codeExecutionBasic(); + await codeExecutionRequestOverride(); + await codeExecutionChat(); +} + +runAll(); diff --git a/samples/node/content-caching.js b/samples/node/content-caching.js deleted file mode 100644 index a2dbd670..00000000 --- a/samples/node/content-caching.js +++ /dev/null @@ -1,68 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Example of uploading a content cache and referencing it in a call to - * generateContent(). - * - * NOTE: Creating and modifying content caches is a feature only available for - * use in Node. - */ - -import { GoogleAICacheManager } from "@google/generative-ai/server"; -import { genAI } from "./utils/common.js"; - -async function run() { - const cacheManager = new GoogleAICacheManager(process.env.API_KEY); - - // Generate a very long string - let longContentString = ""; - for (let i = 0; i < 32001; i++) { - longContentString += "Purple cats drink gatorade."; - longContentString += i % 8 === 7 ? "\n" : " "; - } - - const cacheResult = await cacheManager.create({ - ttlSeconds: 600, - model: "models/gemini-1.5-pro-001", - contents: [ - { - role: "user", - parts: [{ text: longContentString }], - }, - ], - }); - - const cache = await cacheManager.get(cacheResult.name); - - const model = genAI.getGenerativeModelFromCachedContent(cache); - - const result = await model.generateContent({ - contents: [ - { - role: "user", - parts: [{ text: "What do purple cats drink?" }], - }, - ], - }); - - const response = result.response; - const text = response.text(); - console.log(text); -} - -run(); diff --git a/samples/node/embed.js b/samples/node/embed.js new file mode 100644 index 00000000..0b39f88d --- /dev/null +++ b/samples/node/embed.js @@ -0,0 +1,62 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { GoogleGenerativeAI } from "@google/generative-ai"; + +async function embedContent() { + // [START embed_content] + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModel({ + model: "text-embedding-004", + }); + + const result = await model.embedContent("Hello world!"); + + console.log(result.embedding); + // [END embed_content] +} + +async function batchEmbedContents() { + // [START batch_embed_contents] + const genAI = new GoogleGenerativeAI(process.env.API_KEY); + const model = genAI.getGenerativeModel({ + model: "text-embedding-004", + }); + + function textToRequest(text) { + return { content: { role: "user", parts: [{ text }] } }; + } + + const result = await model.batchEmbedContents({ + requests: [ + textToRequest("What is the meaning of life?"), + textToRequest("How much wood would a woodchuck chuck?"), + textToRequest("How does the brain work?"), + ], + }); + + console.log(result.embeddings); + // [END batch_embed_contents] +} + +async function runAll() { + // Comment out or delete any sample cases you don't want to run. + await embedContent(); + await batchEmbedContents(); +} + +runAll(); diff --git a/samples/node/file-upload.js b/samples/node/file-upload.js deleted file mode 100644 index 8deccb9b..00000000 --- a/samples/node/file-upload.js +++ /dev/null @@ -1,65 +0,0 @@ -/** - * @license - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Example of uploading a file and referencing it in a call to - * generateContent(). - * - * NOTE: The Files API is only available for use in Node. - * Importing GoogleAIFileManager will crash in the - * browser. - */ - -import { GoogleAIFileManager } from "@google/generative-ai/server"; -import { genAI } from "./utils/common.js"; - -async function run() { - const model = genAI.getGenerativeModel({ - model: "gemini-1.5-flash-latest", - }); - const fileManager = new GoogleAIFileManager(process.env.API_KEY); - - const fileResult = await fileManager.uploadFile("./utils/cat.jpg", { - mimeType: "image/jpeg", - // It will also add the necessary "files/" prefix if not provided - name: "files/catname", - displayName: "mrcat", - }); - - const result = await model.generateContent({ - contents: [ - { - role: "user", - parts: [ - { text: "What is this?" }, - { - fileData: { - mimeType: fileResult.file.mimeType, - fileUri: fileResult.file.uri - } - }, - ], - }, - ], - }); - - const response = result.response; - const text = response.text(); - console.log(text); -} - -run(); diff --git a/samples/node/package.json b/samples/node/package.json index 16d4e8a1..aadabcd0 100644 --- a/samples/node/package.json +++ b/samples/node/package.json @@ -4,6 +4,7 @@ "@google/generative-ai": "*" }, "scripts": { - "check-samples": "node ./utils/check-samples.js" + "check-samples": "node ./utils/check-samples.js", + "test": "yarn check-samples" } } diff --git a/samples/node/simple-chat.js b/samples/node/simple-chat.js deleted file mode 100644 index 393f4a8e..00000000 --- a/samples/node/simple-chat.js +++ /dev/null @@ -1,33 +0,0 @@ -/** - * @license - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { genAI } from "./utils/common.js"; - -async function run() { - const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash-latest"}); - - const chat = model.startChat(); - - const msg = "Hello! How are you?"; - - const result = await chat.sendMessage(msg); - const response = result.response; - const text = response.text(); - console.log(text); -} - -run(); diff --git a/samples/node/simple-embedding.js b/samples/node/simple-embedding.js deleted file mode 100644 index c598d97e..00000000 --- a/samples/node/simple-embedding.js +++ /dev/null @@ -1,31 +0,0 @@ -/** - * @license - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { genAI } from "./utils/common.js"; - -async function run() { - // For embeddings, use the embedding-001 model - const model = genAI.getGenerativeModel({ model: "embedding-001" }); - - const text = "The quick brown fox jumps over the lazy dog."; - - const result = await model.embedContent(text); - const embedding = result.embedding; - console.log(embedding.values); -} - -run(); diff --git a/samples/node/text_generation.js b/samples/node/text_generation.js index d5a2e4e9..1049a2b5 100644 --- a/samples/node/text_generation.js +++ b/samples/node/text_generation.js @@ -32,9 +32,7 @@ async function textGenTextOnlyPrompt() { const prompt = "Write a story about a magic backpack."; const result = await model.generateContent(prompt); - const response = result.response; - const text = response.text(); - console.log(text); + console.log(result.response.text()); // [END text_gen_text_only_prompt] } @@ -50,7 +48,7 @@ async function textGenTextOnlyPromptStreaming() { // Print text as it comes in. for await (const chunk of result.stream) { const chunkText = chunk.text(); - console.log(chunkText); + process.stdout.write(chunkText); } // [END text_gen_text_only_prompt_streaming] } @@ -77,9 +75,7 @@ async function textGenMultimodalOneImagePrompt() { ); const result = await model.generateContent([prompt, imagePart]); - const response = result.response; - const text = response.text(); - console.log(text); + console.log(result.response.text()); // [END text_gen_multimodal_one_image_prompt] } @@ -109,7 +105,7 @@ async function textGenMultimodalOneImagePromptStreaming() { // Print text as it comes in. for await (const chunk of result.stream) { const chunkText = chunk.text(); - console.log(chunkText); + process.stdout.write(chunkText); } // [END text_gen_multimodal_one_image_prompt_streaming] } @@ -140,9 +136,7 @@ async function textGenMultimodalMultiImagePrompt() { ]; const result = await model.generateContent([prompt, ...imageParts]); - const response = result.response; - const text = response.text(); - console.log(text); + console.log(result.response.text()); // [END text_gen_multimodal_multi_image_prompt] } @@ -176,7 +170,7 @@ async function textGenMultimodalMultiImagePromptStreaming() { // Print text as it comes in. for await (const chunk of result.stream) { const chunkText = chunk.text(); - console.log(chunkText); + process.stdout.write(chunkText); } // [END text_gen_multimodal_multi_image_prompt_streaming] } @@ -203,9 +197,7 @@ async function textGenMultimodalAudio() { ); const result = await model.generateContent([prompt, audioPart]); - const response = result.response; - const text = response.text(); - console.log(text); + console.log(result.response.text()); // [END text_gen_multimodal_audio] } @@ -243,10 +235,9 @@ async function textGenMultimodalVideoPrompt() { }; const result = await model.generateContent([prompt, videoPart]); - const response = result.response; - const text = response.text(); - console.log(text); + console.log(result.response.text()); // [END text_gen_multimodal_video_prompt] + await fileManager.deleteFile(uploadResult.file.name); } async function textGenMultimodalVideoPromptStreaming() { @@ -282,11 +273,14 @@ async function textGenMultimodalVideoPromptStreaming() { }, }; - const result = await model.generateContent([prompt, videoPart]); - const response = result.response; - const text = response.text(); - console.log(text); + const result = await model.generateContentStream([prompt, videoPart]); + // Print text as it comes in. + for await (const chunk of result.stream) { + const chunkText = chunk.text(); + process.stdout.write(chunkText); + } // [END text_gen_multimodal_video_prompt_streaming] + await fileManager.deleteFile(uploadResult.file.name); } async function runAll() { diff --git a/samples/node/utils/common.js b/samples/node/utils/common.js deleted file mode 100644 index 68a115d8..00000000 --- a/samples/node/utils/common.js +++ /dev/null @@ -1,57 +0,0 @@ -/** - * @license - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { GoogleGenerativeAI } from "@google/generative-ai"; -import fs from "fs"; - -// Get your API key from https://makersuite.google.com/app/apikey -// Access your API key as an environment variable -export const genAI = new GoogleGenerativeAI(process.env.API_KEY); - -// Converts local file information to a GoogleGenerativeAI.Part object -export function fileToGenerativePart(path, mimeType) { - return { - inlineData: { - data: Buffer.from(fs.readFileSync(path)).toString("base64"), - mimeType, - }, - }; -} - -// Prints chunks of generated text to the console as they become available -export async function streamToStdout(stream) { - console.log("Streaming...\n"); - for await (const chunk of stream) { - // Get first candidate's current text chunk - const chunkText = chunk.text(); - // Print to console without adding line breaks - process.stdout.write(chunkText); - } - // Print blank line - console.log("\n"); -} - -export async function displayTokenCount(model, request) { - const { totalTokens } = await model.countTokens(request); - console.log("Token count: ", totalTokens); -} - -export async function displayChatTokenCount(model, chat, msg) { - const history = await chat.getHistory(); - const msgContent = { role: "user", parts: [{ text: msg }] }; - await displayTokenCount(model, { contents: [...history, msgContent] }); -}