Skip to content

Commit

Permalink
feat: genai sample base (#3587)
Browse files Browse the repository at this point in the history
* feat: initial base for generative-ai

* # This is a combination of 16 commits.
# This is the 1st commit message:

refactor: updating codeowners

# This is the commit message #2:

add chat functions

# This is the commit message #3:

use correct testing project

# This is the commit message #4:

refactor: adding system tests + updating corresponding chat samples

# This is the commit message #5:

add countTokens sample

# This is the commit message #6:

refactor: adding in region tags, abstracting out mimetype, adding new image ur

# This is the commit message #7:

refactor: updating gs url in test, fix to args getting passed to sample functions

# This is the commit message #8:

refactor: resolving file paths in tests, adding wait helper function

# This is the commit message #9:

add warning about safety concerns

# This is the commit message #10:

refactor:filling out nonstreamingchat and streamcontent tests

# This is the commit message #11:

add countTokens test

# This is the commit message #12:

refactor: filling out more streaming tests

# This is the commit message #13:

add safety settings test

# This is the commit message #14:

refactor: adding in stream content and multipart content tests

# This is the commit message #15:

feat: add new sendMultiModalPromptWithImage sample

# This is the commit message #16:

refactor: adding region tags

* refactor: updating codeowners

add chat functions

use correct testing project

refactor: adding system tests + updating corresponding chat samples

add countTokens sample

refactor: adding in region tags, abstracting out mimetype, adding new image ur

refactor: updating gs url in test, fix to args getting passed to sample functions

refactor: resolving file paths in tests, adding wait helper function

add warning about safety concerns

refactor:filling out nonstreamingchat and streamcontent tests

add countTokens test

refactor: filling out more streaming tests

add safety settings test

refactor: adding in stream content and multipart content tests

feat: add new sendMultiModalPromptWithImage sample

refactor: adding region tags

update to common prompt

fix: resolve linting

* refactor: remove index file

---------

Co-authored-by: pattishin@google.com <pattishin@users.noreply.github.com>
  • Loading branch information
LukeSchlangen and pattishin authored Dec 13, 2023
1 parent 73de2e8 commit dfb4fa9
Show file tree
Hide file tree
Showing 23 changed files with 1,031 additions and 0 deletions.
1 change: 1 addition & 0 deletions .github/auto-label.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ path:
eventarc: "eventarc"
error-reporting: "clouderrorreporting"
functions: "cloudfunctions"
generative-ai: "genai"
game-servers: "gameservices"
healthcare: "healhcare"
iam: "iam"
Expand Down
106 changes: 106 additions & 0 deletions .github/workflows/generative-ai-snippets.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

name: generative-ai-snippets
on:
push:
branches:
- main
paths:
- 'generative-ai/snippets/**'
- '.github/workflows/generative-ai-snippets.yaml'
pull_request:
paths:
- 'generative-ai/snippets/**'
- '.github/workflows/generative-ai-snippets.yaml'
pull_request_target:
types: [labeled]
paths:
- 'generative-ai/snippets/**'
- '.github/workflows/generative-ai-snippets.yaml'
schedule:
- cron: '0 0 * * 0'
jobs:
test:
if: github.event.action != 'labeled' || github.event.label.name == 'actions:force-run'
runs-on: ubuntu-latest
timeout-minutes: 120
permissions:
contents: 'read'
id-token: 'write'
defaults:
run:
working-directory: 'generative-ai/snippets'
steps:
- uses: actions/checkout@v4.1.0
with:
ref: ${{github.event.pull_request.head.sha}}
- uses: 'google-github-actions/auth@v1.1.1'
with:
workload_identity_provider: 'projects/1046198160504/locations/global/workloadIdentityPools/github-actions-pool/providers/github-actions-provider'
service_account: 'kokoro-system-test@long-door-651.iam.gserviceaccount.com'
create_credentials_file: 'true'
access_token_lifetime: 600s
- id: secrets
uses: 'google-github-actions/get-secretmanager-secrets@v1'
with:
secrets: |-
caip_id:nodejs-docs-samples-tests/nodejs-docs-samples-ai-platform-caip-project-id
location:nodejs-docs-samples-tests/nodejs-docs-samples-ai-platform-location
- uses: actions/setup-node@v4.0.0
with:
node-version: 16
- name: Get npm cache directory
id: npm-cache-dir
shell: bash
run: echo "dir=$(npm config get cache)" >> ${GITHUB_OUTPUT}
- uses: actions/cache@v3
id: npm-cache
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: install repo dependencies
run: npm install
working-directory: .
- name: install directory dependencies
run: npm install
- run: npm run build --if-present
- name: set env vars for scheduled run
if: github.event.action == 'schedule'
run: |
echo "MOCHA_REPORTER_SUITENAME=generative-ai-snippets" >> $GITHUB_ENV
echo "MOCHA_REPORTER_OUTPUT=${{github.run_id}}_sponge_log.xml" >> $GITHUB_ENV
echo "MOCHA_REPORTER=xunit" >> $GITHUB_ENV
- run: npm test
env:
LOCATION: ${{ steps.secrets.outputs.location }}
CAIP_PROJECT_ID: ${{ steps.secrets.outputs.caip_id }}
- name: upload test results for FlakyBot workflow
if: github.event.action == 'schedule' && always()
uses: actions/upload-artifact@v3
env:
MOCHA_REPORTER_OUTPUT: "${{github.run_id}}_sponge_log.xml"
with:
name: test-results
path: generative-ai/snippets/${{ env.MOCHA_REPORTER_OUTPUT }}
retention-days: 1
flakybot:
permissions:
contents: 'read'
id-token: 'write'
if: github.event_name == 'schedule' && always() # always() submits logs even if tests fail
uses: ./.github/workflows/flakybot.yaml
needs: [test]
1 change: 1 addition & 0 deletions .github/workflows/utils/workflows-secrets.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@
"iam/deny",
"security-center/snippets",
"storagetransfer",
"generative-ai/snippets",
"vision"
]
1 change: 1 addition & 0 deletions CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ monitoring/opencensus @GoogleCloudPlatform/nodejs-samples-reviewers

# Data & AI
ai-platform @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
generative-ai @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
automl @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
cloud-language @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
contact-center-insights @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
Expand Down
52 changes: 52 additions & 0 deletions generative-ai/snippets/countTokens.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

const {VertexAI} = require('@google-cloud/vertexai');

async function countTokens(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
) {
// [START aiplatform_gemini_token_count]

/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';
// const model = 'gemini-pro';

// Initialize Vertex with your Cloud project and location
const vertex_ai = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertex_ai.preview.getGenerativeModel({
model: model,
});

const req = {
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
};

const countTokensResp = await generativeModel.countTokens(req);
console.log('count tokens response: ', countTokensResp);

// [END aiplatform_gemini_token_count]
}

countTokens(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});
73 changes: 73 additions & 0 deletions generative-ai/snippets/nonStreamingChat.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

const {VertexAI} = require('@google-cloud/vertexai');

function wait(time) {

Check failure on line 17 in generative-ai/snippets/nonStreamingChat.js

View workflow job for this annotation

GitHub Actions / lint

'wait' is defined but never used
return new Promise(resolve => {
setTimeout(resolve, time);
});
}

async function createNonStreamingChat(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
) {
// TODO: Find better method. Setting delay to give api time to respond, otherwise it will 404
// await wait(10);

// [START aiplatform_gemini_multiturn_chat]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
model: model,
});

const chat = generativeModel.startChat({});

const chatInput1 = 'Hello';
console.log(`User: ${chatInput1}`);

const result1 = await chat.sendMessage(chatInput1);
const response1 = result1.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response1);

const chatInput2 = 'Can you tell me a scientific fun fact?';
console.log(`User: ${chatInput2}`);
const result2 = await chat.sendMessage(chatInput2);
const response2 = result2.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response2);

const chatInput3 = 'How can I learn more about that?';
console.log(`User: ${chatInput3}`);
const result3 = await chat.sendMessage(chatInput3);
const response3 = result3.response.candidates[0].content.parts[0].text;
console.log('Chat bot: ', response3);

// [END aiplatform_gemini_multiturn_chat]
}

createNonStreamingChat(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});
64 changes: 64 additions & 0 deletions generative-ai/snippets/nonStreamingContent.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

const {VertexAI} = require('@google-cloud/vertexai');

async function createNonStreamingContent(
projectId = 'PROJECT_ID',
location = 'LOCATION_ID',
model = 'MODEL'
) {
// [START aiplatform_gemini_function_calling]

/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const projectId = 'your-project-id';
// const location = 'us-central1';

// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});

// Instantiate the model
const generativeModel = vertexAI.preview.getGenerativeModel({
model: model,
});

const request = {
contents: [{role: 'user', parts: [{text: 'What is Node.js?'}]}],
};

console.log('Prompt:');
console.log(request.contents[0].parts[0].text);
console.log('Non-Streaming Response Text:');

// Create the response stream
const responseStream = await generativeModel.generateContentStream(request);

// Wait for the response stream to complete
const aggregatedResponse = await responseStream.response;

// Select the text from the response
const fullTextResponse =
aggregatedResponse.candidates[0].content.parts[0].text;

console.log(fullTextResponse);

// [END aiplatform_gemini_function_calling]
}

createNonStreamingContent(...process.argv.slice(2)).catch(err => {
console.error(err.message);
process.exitCode = 1;
});
Loading

0 comments on commit dfb4fa9

Please sign in to comment.