From a50bfc3c67b76296c9929c3e5e8b6d84465b257b Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Fri, 24 Jun 2022 09:53:57 +0000 Subject: [PATCH 1/2] feat: support regapic LRO Use gapic-generator-typescript v2.15.1. PiperOrigin-RevId: 456946341 Source-Link: https://github.com/googleapis/googleapis/commit/88fd18d9d3b872b3d06a3d9392879f50b5bf3ce5 Source-Link: https://github.com/googleapis/googleapis-gen/commit/accfa371f667439313335c64042b063c1c53102e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYWNjZmEzNzFmNjY3NDM5MzEzMzM1YzY0MDQyYjA2M2MxYzUzMTAyZSJ9 --- owl-bot-staging/v1/.eslintignore | 7 + owl-bot-staging/v1/.eslintrc.json | 3 + owl-bot-staging/v1/.gitignore | 14 + owl-bot-staging/v1/.jsdoc.js | 55 + owl-bot-staging/v1/.mocharc.js | 33 + owl-bot-staging/v1/.prettierrc.js | 22 + owl-bot-staging/v1/README.md | 1 + owl-bot-staging/v1/linkinator.config.json | 16 + owl-bot-staging/v1/package.json | 64 + .../v1/video_intelligence.proto | 906 ++++++++++++++ ...ata.google.cloud.videointelligence.v1.json | 75 ++ ...deo_intelligence_service.annotate_video.js | 100 ++ owl-bot-staging/v1/src/index.ts | 25 + owl-bot-staging/v1/src/v1/gapic_metadata.json | 33 + owl-bot-staging/v1/src/v1/index.ts | 19 + .../v1/video_intelligence_service_client.ts | 443 +++++++ ...eo_intelligence_service_client_config.json | 40 + ...video_intelligence_service_proto_list.json | 3 + .../system-test/fixtures/sample/src/index.js | 27 + .../system-test/fixtures/sample/src/index.ts | 32 + owl-bot-staging/v1/system-test/install.ts | 49 + .../gapic_video_intelligence_service_v1.ts | 259 ++++ owl-bot-staging/v1/tsconfig.json | 19 + owl-bot-staging/v1/webpack.config.js | 64 + owl-bot-staging/v1beta2/.eslintignore | 7 + owl-bot-staging/v1beta2/.eslintrc.json | 3 + owl-bot-staging/v1beta2/.gitignore | 14 + owl-bot-staging/v1beta2/.jsdoc.js | 55 + owl-bot-staging/v1beta2/.mocharc.js | 33 + owl-bot-staging/v1beta2/.prettierrc.js | 22 + owl-bot-staging/v1beta2/README.md | 1 + .../v1beta2/linkinator.config.json | 16 + owl-bot-staging/v1beta2/package.json | 64 + .../v1beta2/video_intelligence.proto | 410 +++++++ ...oogle.cloud.videointelligence.v1beta2.json | 75 ++ ...deo_intelligence_service.annotate_video.js | 99 ++ owl-bot-staging/v1beta2/src/index.ts | 25 + .../v1beta2/src/v1beta2/gapic_metadata.json | 33 + owl-bot-staging/v1beta2/src/v1beta2/index.ts | 19 + .../video_intelligence_service_client.ts | 442 +++++++ ...eo_intelligence_service_client_config.json | 40 + ...video_intelligence_service_proto_list.json | 3 + .../system-test/fixtures/sample/src/index.js | 27 + .../system-test/fixtures/sample/src/index.ts | 32 + .../v1beta2/system-test/install.ts | 49 + ...apic_video_intelligence_service_v1beta2.ts | 259 ++++ owl-bot-staging/v1beta2/tsconfig.json | 19 + owl-bot-staging/v1beta2/webpack.config.js | 64 + owl-bot-staging/v1p1beta1/.eslintignore | 7 + owl-bot-staging/v1p1beta1/.eslintrc.json | 3 + owl-bot-staging/v1p1beta1/.gitignore | 14 + owl-bot-staging/v1p1beta1/.jsdoc.js | 55 + owl-bot-staging/v1p1beta1/.mocharc.js | 33 + owl-bot-staging/v1p1beta1/.prettierrc.js | 22 + owl-bot-staging/v1p1beta1/README.md | 1 + .../v1p1beta1/linkinator.config.json | 16 + owl-bot-staging/v1p1beta1/package.json | 64 + .../v1p1beta1/video_intelligence.proto | 450 +++++++ ...gle.cloud.videointelligence.v1p1beta1.json | 75 ++ ...deo_intelligence_service.annotate_video.js | 99 ++ owl-bot-staging/v1p1beta1/src/index.ts | 25 + .../src/v1p1beta1/gapic_metadata.json | 33 + .../v1p1beta1/src/v1p1beta1/index.ts | 19 + .../video_intelligence_service_client.ts | 442 +++++++ ...eo_intelligence_service_client_config.json | 40 + ...video_intelligence_service_proto_list.json | 3 + .../system-test/fixtures/sample/src/index.js | 27 + .../system-test/fixtures/sample/src/index.ts | 32 + .../v1p1beta1/system-test/install.ts | 49 + ...ic_video_intelligence_service_v1p1beta1.ts | 259 ++++ owl-bot-staging/v1p1beta1/tsconfig.json | 19 + owl-bot-staging/v1p1beta1/webpack.config.js | 64 + owl-bot-staging/v1p2beta1/.eslintignore | 7 + owl-bot-staging/v1p2beta1/.eslintrc.json | 3 + owl-bot-staging/v1p2beta1/.gitignore | 14 + owl-bot-staging/v1p2beta1/.jsdoc.js | 55 + owl-bot-staging/v1p2beta1/.mocharc.js | 33 + owl-bot-staging/v1p2beta1/.prettierrc.js | 22 + owl-bot-staging/v1p2beta1/README.md | 1 + .../v1p2beta1/linkinator.config.json | 16 + owl-bot-staging/v1p2beta1/package.json | 64 + .../v1p2beta1/video_intelligence.proto | 489 ++++++++ ...gle.cloud.videointelligence.v1p2beta1.json | 75 ++ ...deo_intelligence_service.annotate_video.js | 97 ++ owl-bot-staging/v1p2beta1/src/index.ts | 25 + .../src/v1p2beta1/gapic_metadata.json | 33 + .../v1p2beta1/src/v1p2beta1/index.ts | 19 + .../video_intelligence_service_client.ts | 440 +++++++ ...eo_intelligence_service_client_config.json | 40 + ...video_intelligence_service_proto_list.json | 3 + .../system-test/fixtures/sample/src/index.js | 27 + .../system-test/fixtures/sample/src/index.ts | 32 + .../v1p2beta1/system-test/install.ts | 49 + ...ic_video_intelligence_service_v1p2beta1.ts | 259 ++++ owl-bot-staging/v1p2beta1/tsconfig.json | 19 + owl-bot-staging/v1p2beta1/webpack.config.js | 64 + owl-bot-staging/v1p3beta1/.eslintignore | 7 + owl-bot-staging/v1p3beta1/.eslintrc.json | 3 + owl-bot-staging/v1p3beta1/.gitignore | 14 + owl-bot-staging/v1p3beta1/.jsdoc.js | 55 + owl-bot-staging/v1p3beta1/.mocharc.js | 33 + owl-bot-staging/v1p3beta1/.prettierrc.js | 22 + owl-bot-staging/v1p3beta1/README.md | 1 + .../v1p3beta1/linkinator.config.json | 16 + owl-bot-staging/v1p3beta1/package.json | 65 + .../v1p3beta1/video_intelligence.proto | 1090 +++++++++++++++++ ...gle.cloud.videointelligence.v1p3beta1.json | 119 ++ ...igence_service.streaming_annotate_video.js | 73 ++ ...deo_intelligence_service.annotate_video.js | 100 ++ owl-bot-staging/v1p3beta1/src/index.ts | 27 + .../src/v1p3beta1/gapic_metadata.json | 51 + .../v1p3beta1/src/v1p3beta1/index.ts | 20 + ...aming_video_intelligence_service_client.ts | 331 +++++ ...eo_intelligence_service_client_config.json | 31 + ...video_intelligence_service_proto_list.json | 3 + .../video_intelligence_service_client.ts | 443 +++++++ ...eo_intelligence_service_client_config.json | 40 + ...video_intelligence_service_proto_list.json | 3 + .../system-test/fixtures/sample/src/index.js | 28 + .../system-test/fixtures/sample/src/index.ts | 38 + .../v1p3beta1/system-test/install.ts | 49 + ...ng_video_intelligence_service_v1p3beta1.ts | 195 +++ ...ic_video_intelligence_service_v1p3beta1.ts | 259 ++++ owl-bot-staging/v1p3beta1/tsconfig.json | 19 + owl-bot-staging/v1p3beta1/webpack.config.js | 64 + 125 files changed, 11056 insertions(+) create mode 100644 owl-bot-staging/v1/.eslintignore create mode 100644 owl-bot-staging/v1/.eslintrc.json create mode 100644 owl-bot-staging/v1/.gitignore create mode 100644 owl-bot-staging/v1/.jsdoc.js create mode 100644 owl-bot-staging/v1/.mocharc.js create mode 100644 owl-bot-staging/v1/.prettierrc.js create mode 100644 owl-bot-staging/v1/README.md create mode 100644 owl-bot-staging/v1/linkinator.config.json create mode 100644 owl-bot-staging/v1/package.json create mode 100644 owl-bot-staging/v1/protos/google/cloud/videointelligence/v1/video_intelligence.proto create mode 100644 owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.videointelligence.v1.json create mode 100644 owl-bot-staging/v1/samples/generated/v1/video_intelligence_service.annotate_video.js create mode 100644 owl-bot-staging/v1/src/index.ts create mode 100644 owl-bot-staging/v1/src/v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/src/v1/index.ts create mode 100644 owl-bot-staging/v1/src/v1/video_intelligence_service_client.ts create mode 100644 owl-bot-staging/v1/src/v1/video_intelligence_service_client_config.json create mode 100644 owl-bot-staging/v1/src/v1/video_intelligence_service_proto_list.json create mode 100644 owl-bot-staging/v1/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/v1/system-test/install.ts create mode 100644 owl-bot-staging/v1/test/gapic_video_intelligence_service_v1.ts create mode 100644 owl-bot-staging/v1/tsconfig.json create mode 100644 owl-bot-staging/v1/webpack.config.js create mode 100644 owl-bot-staging/v1beta2/.eslintignore create mode 100644 owl-bot-staging/v1beta2/.eslintrc.json create mode 100644 owl-bot-staging/v1beta2/.gitignore create mode 100644 owl-bot-staging/v1beta2/.jsdoc.js create mode 100644 owl-bot-staging/v1beta2/.mocharc.js create mode 100644 owl-bot-staging/v1beta2/.prettierrc.js create mode 100644 owl-bot-staging/v1beta2/README.md create mode 100644 owl-bot-staging/v1beta2/linkinator.config.json create mode 100644 owl-bot-staging/v1beta2/package.json create mode 100644 owl-bot-staging/v1beta2/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto create mode 100644 owl-bot-staging/v1beta2/samples/generated/v1beta2/snippet_metadata.google.cloud.videointelligence.v1beta2.json create mode 100644 owl-bot-staging/v1beta2/samples/generated/v1beta2/video_intelligence_service.annotate_video.js create mode 100644 owl-bot-staging/v1beta2/src/index.ts create mode 100644 owl-bot-staging/v1beta2/src/v1beta2/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta2/src/v1beta2/index.ts create mode 100644 owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client.ts create mode 100644 owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client_config.json create mode 100644 owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_proto_list.json create mode 100644 owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/v1beta2/system-test/install.ts create mode 100644 owl-bot-staging/v1beta2/test/gapic_video_intelligence_service_v1beta2.ts create mode 100644 owl-bot-staging/v1beta2/tsconfig.json create mode 100644 owl-bot-staging/v1beta2/webpack.config.js create mode 100644 owl-bot-staging/v1p1beta1/.eslintignore create mode 100644 owl-bot-staging/v1p1beta1/.eslintrc.json create mode 100644 owl-bot-staging/v1p1beta1/.gitignore create mode 100644 owl-bot-staging/v1p1beta1/.jsdoc.js create mode 100644 owl-bot-staging/v1p1beta1/.mocharc.js create mode 100644 owl-bot-staging/v1p1beta1/.prettierrc.js create mode 100644 owl-bot-staging/v1p1beta1/README.md create mode 100644 owl-bot-staging/v1p1beta1/linkinator.config.json create mode 100644 owl-bot-staging/v1p1beta1/package.json create mode 100644 owl-bot-staging/v1p1beta1/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto create mode 100644 owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/snippet_metadata.google.cloud.videointelligence.v1p1beta1.json create mode 100644 owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js create mode 100644 owl-bot-staging/v1p1beta1/src/index.ts create mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/index.ts create mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client.ts create mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client_config.json create mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_proto_list.json create mode 100644 owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/v1p1beta1/system-test/install.ts create mode 100644 owl-bot-staging/v1p1beta1/test/gapic_video_intelligence_service_v1p1beta1.ts create mode 100644 owl-bot-staging/v1p1beta1/tsconfig.json create mode 100644 owl-bot-staging/v1p1beta1/webpack.config.js create mode 100644 owl-bot-staging/v1p2beta1/.eslintignore create mode 100644 owl-bot-staging/v1p2beta1/.eslintrc.json create mode 100644 owl-bot-staging/v1p2beta1/.gitignore create mode 100644 owl-bot-staging/v1p2beta1/.jsdoc.js create mode 100644 owl-bot-staging/v1p2beta1/.mocharc.js create mode 100644 owl-bot-staging/v1p2beta1/.prettierrc.js create mode 100644 owl-bot-staging/v1p2beta1/README.md create mode 100644 owl-bot-staging/v1p2beta1/linkinator.config.json create mode 100644 owl-bot-staging/v1p2beta1/package.json create mode 100644 owl-bot-staging/v1p2beta1/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto create mode 100644 owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/snippet_metadata.google.cloud.videointelligence.v1p2beta1.json create mode 100644 owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js create mode 100644 owl-bot-staging/v1p2beta1/src/index.ts create mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/index.ts create mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client.ts create mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client_config.json create mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_proto_list.json create mode 100644 owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/v1p2beta1/system-test/install.ts create mode 100644 owl-bot-staging/v1p2beta1/test/gapic_video_intelligence_service_v1p2beta1.ts create mode 100644 owl-bot-staging/v1p2beta1/tsconfig.json create mode 100644 owl-bot-staging/v1p2beta1/webpack.config.js create mode 100644 owl-bot-staging/v1p3beta1/.eslintignore create mode 100644 owl-bot-staging/v1p3beta1/.eslintrc.json create mode 100644 owl-bot-staging/v1p3beta1/.gitignore create mode 100644 owl-bot-staging/v1p3beta1/.jsdoc.js create mode 100644 owl-bot-staging/v1p3beta1/.mocharc.js create mode 100644 owl-bot-staging/v1p3beta1/.prettierrc.js create mode 100644 owl-bot-staging/v1p3beta1/README.md create mode 100644 owl-bot-staging/v1p3beta1/linkinator.config.json create mode 100644 owl-bot-staging/v1p3beta1/package.json create mode 100644 owl-bot-staging/v1p3beta1/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto create mode 100644 owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/snippet_metadata.google.cloud.videointelligence.v1p3beta1.json create mode 100644 owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js create mode 100644 owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js create mode 100644 owl-bot-staging/v1p3beta1/src/index.ts create mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/index.ts create mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client.ts create mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client_config.json create mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_proto_list.json create mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client.ts create mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client_config.json create mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_proto_list.json create mode 100644 owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.js create mode 100644 owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.ts create mode 100644 owl-bot-staging/v1p3beta1/system-test/install.ts create mode 100644 owl-bot-staging/v1p3beta1/test/gapic_streaming_video_intelligence_service_v1p3beta1.ts create mode 100644 owl-bot-staging/v1p3beta1/test/gapic_video_intelligence_service_v1p3beta1.ts create mode 100644 owl-bot-staging/v1p3beta1/tsconfig.json create mode 100644 owl-bot-staging/v1p3beta1/webpack.config.js diff --git a/owl-bot-staging/v1/.eslintignore b/owl-bot-staging/v1/.eslintignore new file mode 100644 index 00000000..cfc348ec --- /dev/null +++ b/owl-bot-staging/v1/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/v1/.eslintrc.json b/owl-bot-staging/v1/.eslintrc.json new file mode 100644 index 00000000..78215349 --- /dev/null +++ b/owl-bot-staging/v1/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/v1/.gitignore b/owl-bot-staging/v1/.gitignore new file mode 100644 index 00000000..5d32b237 --- /dev/null +++ b/owl-bot-staging/v1/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/v1/.jsdoc.js b/owl-bot-staging/v1/.jsdoc.js new file mode 100644 index 00000000..6c816e68 --- /dev/null +++ b/owl-bot-staging/v1/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/video-intelligence', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/v1/.mocharc.js b/owl-bot-staging/v1/.mocharc.js new file mode 100644 index 00000000..481c522b --- /dev/null +++ b/owl-bot-staging/v1/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/v1/.prettierrc.js b/owl-bot-staging/v1/.prettierrc.js new file mode 100644 index 00000000..494e1478 --- /dev/null +++ b/owl-bot-staging/v1/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/v1/README.md b/owl-bot-staging/v1/README.md new file mode 100644 index 00000000..d1c53e8c --- /dev/null +++ b/owl-bot-staging/v1/README.md @@ -0,0 +1 @@ +Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1/linkinator.config.json b/owl-bot-staging/v1/linkinator.config.json new file mode 100644 index 00000000..befd23c8 --- /dev/null +++ b/owl-bot-staging/v1/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/v1/package.json b/owl-bot-staging/v1/package.json new file mode 100644 index 00000000..6b17fa2c --- /dev/null +++ b/owl-bot-staging/v1/package.json @@ -0,0 +1,64 @@ +{ + "name": "@google-cloud/video-intelligence", + "version": "0.1.0", + "description": "Videointelligence client for Node.js", + "repository": "googleapis/nodejs-videointelligence", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google videointelligence", + "videointelligence", + "video intelligence service" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^3.1.1" + }, + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^16.0.0", + "@types/sinon": "^10.0.8", + "c8": "^7.11.0", + "gts": "^3.1.0", + "jsdoc": "^3.6.7", + "jsdoc-fresh": "^1.1.1", + "jsdoc-region-tag": "^1.3.1", + "linkinator": "^3.0.0", + "mocha": "^9.1.4", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^13.0.0", + "ts-loader": "^9.2.6", + "typescript": "^4.5.5", + "webpack": "^5.67.0", + "webpack-cli": "^4.9.1" + }, + "engines": { + "node": ">=v12" + } +} diff --git a/owl-bot-staging/v1/protos/google/cloud/videointelligence/v1/video_intelligence.proto b/owl-bot-staging/v1/protos/google/cloud/videointelligence/v1/video_intelligence.proto new file mode 100644 index 00000000..648ec475 --- /dev/null +++ b/owl-bot-staging/v1/protos/google/cloud/videointelligence/v1/video_intelligence.proto @@ -0,0 +1,906 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.videointelligence.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1"; +option ruby_package = "Google::Cloud::VideoIntelligence::V1"; + +// Service that implements the Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported. URIs must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify + // multiple videos, a video URI may include wildcards in the `object-id`. + // Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` must be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via the `input_uri`. + // If set, `input_uri` must be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported. These must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + // region is specified, the region will be determined based on video file + // location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for FACE_DETECTION. + FaceDetectionConfig face_detection_config = 5; + + // Config for SPEECH_TRANSCRIPTION. + SpeechTranscriptionConfig speech_transcription_config = 6; + + // Config for TEXT_DETECTION. + TextDetectionConfig text_detection_config = 8; + + // Config for PERSON_DETECTION. + PersonDetectionConfig person_detection_config = 11; + + // Config for OBJECT_TRACKING. + ObjectTrackingConfig object_tracking_config = 13; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Human face detection. + FACE_DETECTION = 4; + + // Speech transcription. + SPEECH_TRANSCRIPTION = 6; + + // OCR text detection and tracking. + TEXT_DETECTION = 7; + + // Object detection and tracking. + OBJECT_TRACKING = 9; + + // Logo detection, tracking, and recognition. + LOGO_RECOGNITION = 12; + + // Person detection. + PERSON_DETECTION = 14; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e., non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; + + // The confidence threshold we perform filtering on the labels from + // frame-level detection. If not set, it is set to 0.4 by default. The valid + // range for this threshold is [0.1, 0.9]. Any value set outside of this + // range will be clipped. + // Note: For best results, follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float frame_confidence_threshold = 4; + + // The confidence threshold we perform filtering on the labels from + // video-level and shot-level detections. If not set, it's set to 0.3 by + // default. The valid range for this threshold is [0.1, 0.9]. Any value set + // outside of this range will be clipped. + // Note: For best results, follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float video_confidence_threshold = 5; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for OBJECT_TRACKING. +message ObjectTrackingConfig { + // Model to use for object tracking. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for FACE_DETECTION. +message FaceDetectionConfig { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; + + // Whether bounding boxes are included in the face annotation output. + bool include_bounding_boxes = 2; + + // Whether to enable face attributes detection, such as glasses, dark_glasses, + // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 5; +} + +// Config for PERSON_DETECTION. +message PersonDetectionConfig { + // Whether bounding boxes are included in the person detection annotation + // output. + bool include_bounding_boxes = 1; + + // Whether to enable pose landmarks detection. Ignored if + // 'include_bounding_boxes' is set to false. + bool include_pose_landmarks = 2; + + // Whether to enable person attributes detection, such as cloth color (black, + // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, + // etc. + // Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 3; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for TEXT_DETECTION. +message TextDetectionConfig { + // Language hint can be specified if the language to be detected is known a + // priori. It can increase the accuracy of the detection. Language hint must + // be language code in BCP-47 format. + // + // Automatic language detection is performed if no hint is provided. + repeated string language_hints = 1; + + // Model to use for text detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 2; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g., `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // For example, when the label is `Terrier`, the category is likely `dog`. And + // in some cases there might be more than one categories e.g., `Terrier` could + // also be a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; + + // Feature version. + string version = 5; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; + + // Feature version. + string version = 2; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// Face detection annotation. +message FaceDetectionAnnotation { + // The face tracks with attributes. + repeated Track tracks = 3; + + // The thumbnail of a person's face. + bytes thumbnail = 4; + + // Feature version. + string version = 5; +} + +// Person detection annotation per video. +message PersonDetectionAnnotation { + // The detected tracks of a person. + repeated Track tracks = 1; + + // Feature version. + string version = 2; +} + +// Video segment level annotation results for face detection. +message FaceSegment { + // Video segment where a face was detected. + VideoSegment segment = 1; +} + +// Deprecated. No effect. +message FaceFrame { + option deprecated = true; + + // Normalized Bounding boxes in a frame. + // There can be more than one boxes if the same face is detected in multiple + // locations within the current frame. + repeated NormalizedBoundingBox normalized_bounding_boxes = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this location. + google.protobuf.Duration time_offset = 2; +} + +// Deprecated. No effect. +message FaceAnnotation { + option deprecated = true; + + // Thumbnail of a representative face view (in JPEG format). + bytes thumbnail = 1; + + // All video segments where a face was detected. + repeated FaceSegment segments = 2; + + // All video frames where a face was detected. + repeated FaceFrame frames = 3; +} + +// For tracking related features. +// An object at time_offset with attributes, and located with +// normalized_bounding_box. +message TimestampedObject { + // Normalized Bounding box in a frame, where the object is located. + NormalizedBoundingBox normalized_bounding_box = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this object. + google.protobuf.Duration time_offset = 2; + + // Optional. The attributes of the object in the bounding box. + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The detected landmarks. + repeated DetectedLandmark landmarks = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A track of an object instance. +message Track { + // Video segment of a track. + VideoSegment segment = 1; + + // The object with timestamp and attributes per frame in the track. + repeated TimestampedObject timestamped_objects = 2; + + // Optional. Attributes in the track level. + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The confidence score of the tracked object. + float confidence = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A generic detected attribute represented by name in string format. +message DetectedAttribute { + // The name of the attribute, for example, glasses, dark_glasses, mouth_open. + // A full list of supported type names will be provided in the document. + string name = 1; + + // Detected attribute confidence. Range [0, 1]. + float confidence = 2; + + // Text value of the detection result. For example, the value for "HairColor" + // can be "black", "blonde", etc. + string value = 3; +} + +// A generic detected landmark represented by name in string format and a 2D +// location. +message DetectedLandmark { + // The name of this landmark, for example, left_hand, right_shoulder. + string name = 1; + + // The 2D point of the detected landmark using the normalized image + // coordindate system. The normalized coordinates have the range from 0 to 1. + NormalizedVertex point = 2; + + // The confidence score of the detected landmark. Range [0, 1]. + float confidence = 3; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Video segment on which the annotation is run. + VideoSegment segment = 10; + + // Topical label annotations on video level or user-specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Presence label annotations on video level or user-specified segment level. + // There is exactly one element for each unique label. Compared to the + // existing topical `segment_label_annotations`, this field presents more + // fine-grained, segment-level labels detected in video content and is made + // available only when the client sets `LabelDetectionConfig.model` to + // "builtin/latest" in the request. + repeated LabelAnnotation segment_presence_label_annotations = 23; + + // Topical label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Presence label annotations on shot level. There is exactly one element for + // each unique label. Compared to the existing topical + // `shot_label_annotations`, this field presents more fine-grained, shot-level + // labels detected in video content and is made available only when the client + // sets `LabelDetectionConfig.model` to "builtin/latest" in the request. + repeated LabelAnnotation shot_presence_label_annotations = 24; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Deprecated. Please use `face_detection_annotations` instead. + repeated FaceAnnotation face_annotations = 5 [deprecated = true]; + + // Face detection annotations. + repeated FaceDetectionAnnotation face_detection_annotations = 13; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // Speech transcription. + repeated SpeechTranscription speech_transcriptions = 11; + + // OCR text detection and tracking. + // Annotations for list of detected text snippets. Each will have list of + // frame information associated with it. + repeated TextAnnotation text_annotations = 12; + + // Annotations for list of objects detected and tracked in video. + repeated ObjectTrackingAnnotation object_annotations = 14; + + // Annotations for list of logos detected, tracked and recognized in video. + repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; + + // Person detection annotations. + repeated PersonDetectionAnnotation person_detection_annotations = 20; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. Guaranteed to be + // 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; + + // Specifies which feature is being tracked if the request contains more than + // one feature. + Feature feature = 5; + + // Specifies which segment is being tracked if the request contains more than + // one segment. + VideoSegment segment = 6; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Config for SPEECH_TRANSCRIPTION. +message SpeechTranscriptionConfig { + // Required. *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechTranscription`. The server may return fewer than + // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will + // return a maximum of one. If omitted, will return a maximum of one. + int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A means to provide context to assist the speech recognition. + repeated SpeechContext speech_contexts = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', adds punctuation to recognition result hypotheses. + // This feature is only available in select languages. Setting this for + // requests in other languages has no effect at all. The default 'false' value + // does not add punctuation to result hypotheses. NOTE: "This is currently + // offered as an experimental service, complimentary to all users. In the + // future this may be exclusively available as a premium feature." + bool enable_automatic_punctuation = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. For file formats, such as MXF or MKV, supporting multiple audio + // tracks, specify up to two tracks. Default: track 0. + repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', enables speaker detection for each recognized word in + // the top alternative of the recognition result using a speaker_tag provided + // in the WordInfo. + // Note: When this is true, we send all the words from the beginning of the + // audio for the top alternative in every consecutive response. + // This is done in order to improve our speaker tags as our models learn to + // identify the speakers in the conversation over time. + bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set, specifies the estimated number of speakers in the + // conversation. If not set, defaults to '2'. Ignored unless + // enable_speaker_diarization is set to true. + int32 diarization_speaker_count = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If `true`, the top result includes a list of words and the + // confidence for those words. If `false`, no word-level confidence + // information is returned. The default is `false`. + bool enable_word_confidence = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +message SpeechContext { + // Optional. A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// A speech recognition result corresponding to a portion of the audio. +message SpeechTranscription { + // May contain one or more recognition hypotheses (up to the maximum specified + // in `max_alternatives`). These alternatives are ordered in terms of + // accuracy, with the top (first) alternative being the most probable, as + // ranked by the recognizer. + repeated SpeechRecognitionAlternative alternatives = 1; + + // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) + // language tag of the language in this result. This language code was + // detected to have the most likelihood of being spoken in the audio. + string language_code = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Alternative hypotheses (a.k.a. n-best list). +message SpeechRecognitionAlternative { + // Transcript text representing the words that the user spoke. + string transcript = 1; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A list of word-specific information for each recognized word. + // Note: When `enable_speaker_diarization` is set to true, you will see all + // the words from the beginning of the audio. + repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +message WordInfo { + // Time offset relative to the beginning of the audio, and + // corresponding to the start of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration start_time = 1; + + // Time offset relative to the beginning of the audio, and + // corresponding to the end of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration end_time = 2; + + // The word corresponding to this set of information. + string word = 3; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A distinct integer value is assigned for every speaker within + // the audio. This field specifies which one of those speakers was detected to + // have spoken this word. Value ranges from 1 up to diarization_speaker_count, + // and is only set if speaker diarization is enabled. + int32 speaker_tag = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A vertex represents a 2D point in the image. +// NOTE: the normalized vertex coordinates are relative to the original image +// and range from 0 to 1. +message NormalizedVertex { + // X coordinate. + float x = 1; + + // Y coordinate. + float y = 2; +} + +// Normalized bounding polygon for text (that might not be aligned with axis). +// Contains list of the corner points in clockwise order starting from +// top-left corner. For example, for a rectangular bounding box: +// When the text is horizontal it might look like: +// 0----1 +// | | +// 3----2 +// +// When it's clockwise rotated 180 degrees around the top-left corner it +// becomes: +// 2----3 +// | | +// 1----0 +// +// and the vertex order will still be (0, 1, 2, 3). Note that values can be less +// than 0, or greater than 1 due to trignometric calculations for location of +// the box. +message NormalizedBoundingPoly { + // Normalized vertices of the bounding polygon. + repeated NormalizedVertex vertices = 1; +} + +// Video segment level annotation results for text detection. +message TextSegment { + // Video segment where a text snippet was detected. + VideoSegment segment = 1; + + // Confidence for the track of detected text. It is calculated as the highest + // over all frames where OCR detected text appears. + float confidence = 2; + + // Information related to the frames where OCR detected text appears. + repeated TextFrame frames = 3; +} + +// Video frame level annotation results for text annotation (OCR). +// Contains information regarding timestamp and bounding box locations for the +// frames containing detected OCR text snippets. +message TextFrame { + // Bounding polygon of the detected text for this frame. + NormalizedBoundingPoly rotated_bounding_box = 1; + + // Timestamp of this frame. + google.protobuf.Duration time_offset = 2; +} + +// Annotations related to one detected OCR text snippet. This will contain the +// corresponding text, confidence value, and frame level information for each +// detection. +message TextAnnotation { + // The detected text. + string text = 1; + + // All video segments where OCR detected text appears. + repeated TextSegment segments = 2; + + // Feature version. + string version = 3; +} + +// Video frame level annotations for object detection and tracking. This field +// stores per frame location, time offset, and confidence. +message ObjectTrackingFrame { + // The normalized bounding box location of this object track for the frame. + NormalizedBoundingBox normalized_bounding_box = 1; + + // The timestamp of the frame in microseconds. + google.protobuf.Duration time_offset = 2; +} + +// Annotations corresponding to one tracked object. +message ObjectTrackingAnnotation { + // Different representation of tracking info in non-streaming batch + // and streaming modes. + oneof track_info { + // Non-streaming batch mode ONLY. + // Each object track corresponds to one video segment where it appears. + VideoSegment segment = 3; + + // Streaming mode ONLY. + // In streaming mode, we do not know the end time of a tracked object + // before it is completed. Hence, there is no VideoSegment info returned. + // Instead, we provide a unique identifiable integer track_id so that + // the customers can correlate the results of the ongoing + // ObjectTrackAnnotation of the same track_id over time. + int64 track_id = 5; + } + + // Entity to specify the object category that this track is labeled as. + Entity entity = 1; + + // Object category's labeling confidence of this track. + float confidence = 4; + + // Information corresponding to all frames where this object track appears. + // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + // messages in frames. + // Streaming mode: it can only be one ObjectTrackingFrame message in frames. + repeated ObjectTrackingFrame frames = 2; + + // Feature version. + string version = 6; +} + +// Annotation corresponding to one detected, tracked and recognized logo class. +message LogoRecognitionAnnotation { + // Entity category information to specify the logo class that all the logo + // tracks within this LogoRecognitionAnnotation are recognized as. + Entity entity = 1; + + // All logo tracks where the recognized logo appears. Each track corresponds + // to one logo instance appearing in consecutive frames. + repeated Track tracks = 2; + + // All video segments where the recognized logo appears. There might be + // multiple instances of the same logo class appearing in one VideoSegment. + repeated VideoSegment segments = 3; +} diff --git a/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.videointelligence.v1.json b/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.videointelligence.v1.json new file mode 100644 index 00000000..ae482cdf --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.videointelligence.v1.json @@ -0,0 +1,75 @@ +{ + "clientLibrary": { + "name": "nodejs-videointelligence", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.videointelligence.v1", + "version": "v1" + } + ] + }, + "snippets": [ + { + "regionTag": "videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async", + "title": "videointelligence annotateVideo Sample", + "origin": "API_DEFINITION", + "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", + "canonical": true, + "file": "video_intelligence_service.annotate_video.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 92, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1.VideoIntelligenceService.AnnotateVideo", + "async": true, + "parameters": [ + { + "name": "input_uri", + "type": "TYPE_STRING" + }, + { + "name": "input_content", + "type": "TYPE_BYTES" + }, + { + "name": "features", + "type": "TYPE_ENUM[]" + }, + { + "name": "video_context", + "type": ".google.cloud.videointelligence.v1.VideoContext" + }, + { + "name": "output_uri", + "type": "TYPE_STRING" + }, + { + "name": "location_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "VideoIntelligenceServiceClient", + "fullName": "google.cloud.videointelligence.v1.VideoIntelligenceServiceClient" + }, + "method": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1.VideoIntelligenceService.AnnotateVideo", + "service": { + "shortName": "VideoIntelligenceService", + "fullName": "google.cloud.videointelligence.v1.VideoIntelligenceService" + } + } + } + } + ] +} diff --git a/owl-bot-staging/v1/samples/generated/v1/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1/samples/generated/v1/video_intelligence_service.annotate_video.js new file mode 100644 index 00000000..caac0742 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated/v1/video_intelligence_service.annotate_video.js @@ -0,0 +1,100 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(features) { + // [START videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Input video location. Currently, only + * Cloud Storage (https://cloud.google.com/storage/) URIs are + * supported. URIs must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For + * more information, see Request + * URIs (https://cloud.google.com/storage/docs/request-endpoints). To identify + * multiple videos, a video URI may include wildcards in the `object-id`. + * Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` must be unset. + */ + // const inputUri = 'abc123' + /** + * The video data bytes. + * If unset, the input video(s) should be specified via the `input_uri`. + * If set, `input_uri` must be unset. + */ + // const inputContent = 'Buffer.from('string')' + /** + * Required. Requested video annotation features. + */ + // const features = 1234 + /** + * Additional video context and/or feature-specific parameters. + */ + // const videoContext = {} + /** + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only Cloud Storage (https://cloud.google.com/storage/) + * URIs are supported. These must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For + * more information, see Request + * URIs (https://cloud.google.com/storage/docs/request-endpoints). + */ + // const outputUri = 'abc123' + /** + * Optional. Cloud region where annotation should take place. Supported cloud + * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + * region is specified, the region will be determined based on video file + * location. + */ + // const locationId = 'abc123' + + // Imports the Videointelligence library + const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1; + + // Instantiates a client + const videointelligenceClient = new VideoIntelligenceServiceClient(); + + async function callAnnotateVideo() { + // Construct request + const request = { + features, + }; + + // Run request + const [operation] = await videointelligenceClient.annotateVideo(request); + const [response] = await operation.promise(); + console.log(response); + } + + callAnnotateVideo(); + // [END videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/src/index.ts b/owl-bot-staging/v1/src/index.ts new file mode 100644 index 00000000..b7940547 --- /dev/null +++ b/owl-bot-staging/v1/src/index.ts @@ -0,0 +1,25 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1 from './v1'; +const VideoIntelligenceServiceClient = v1.VideoIntelligenceServiceClient; +type VideoIntelligenceServiceClient = v1.VideoIntelligenceServiceClient; +export {v1, VideoIntelligenceServiceClient}; +export default {v1, VideoIntelligenceServiceClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/v1/src/v1/gapic_metadata.json b/owl-bot-staging/v1/src/v1/gapic_metadata.json new file mode 100644 index 00000000..daf4c173 --- /dev/null +++ b/owl-bot-staging/v1/src/v1/gapic_metadata.json @@ -0,0 +1,33 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.videointelligence.v1", + "libraryPackage": "@google-cloud/video-intelligence", + "services": { + "VideoIntelligenceService": { + "clients": { + "grpc": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1/src/v1/index.ts b/owl-bot-staging/v1/src/v1/index.ts new file mode 100644 index 00000000..6fcd1933 --- /dev/null +++ b/owl-bot-staging/v1/src/v1/index.ts @@ -0,0 +1,19 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1/src/v1/video_intelligence_service_client.ts b/owl-bot-staging/v1/src/v1/video_intelligence_service_client.ts new file mode 100644 index 00000000..e7e287c7 --- /dev/null +++ b/owl-bot-staging/v1/src/v1/video_intelligence_service_client.ts @@ -0,0 +1,443 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1/video_intelligence_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './video_intelligence_service_client_config.json'; +import { operationsProtos } from 'google-gax'; +const version = require('../../../package.json').version; + +/** + * Service that implements the Video Intelligence API. + * @class + * @memberof v1 + */ +export class VideoIntelligenceServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + operationsClient: gax.OperationsClient; + videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of VideoIntelligenceServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + */ + constructor(opts?: ClientOptions) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1/operations/{name=projects/*/locations/*/operations/*}:cancel',}], + },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/locations/*}/operations',}]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const annotateVideoResponse = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1.AnnotateVideoResponse') as gax.protobuf.Type; + const annotateVideoMetadata = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1.AnnotateVideoProgress') as gax.protobuf.Type; + + this.descriptors.longrunning = { + annotateVideo: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + annotateVideoResponse.decode.bind(annotateVideoResponse), + annotateVideoMetadata.decode.bind(annotateVideoMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.videointelligence.v1.VideoIntelligenceService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = gax.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.videoIntelligenceServiceStub) { + return this.videoIntelligenceServiceStub; + } + + // Put together the "service stub" for + // google.cloud.videointelligence.v1.VideoIntelligenceService. + this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1.VideoIntelligenceService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.videointelligence.v1.VideoIntelligenceService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const videoIntelligenceServiceStubMethods = + ['annotateVideo']; + for (const methodName of videoIntelligenceServiceStubMethods) { + const callPromise = this.videoIntelligenceServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.videoIntelligenceServiceStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'videointelligence.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + +/** + * Performs asynchronous video annotation. Progress and results can be + * retrieved through the `google.longrunning.Operations` interface. + * `Operation.metadata` contains `AnnotateVideoProgress` (progress). + * `Operation.response` contains `AnnotateVideoResponse` (results). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.inputUri + * Input video location. Currently, only + * [Cloud Storage](https://cloud.google.com/storage/) URIs are + * supported. URIs must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For + * more information, see [Request + * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify + * multiple videos, a video URI may include wildcards in the `object-id`. + * Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` must be unset. + * @param {Buffer} request.inputContent + * The video data bytes. + * If unset, the input video(s) should be specified via the `input_uri`. + * If set, `input_uri` must be unset. + * @param {number[]} request.features + * Required. Requested video annotation features. + * @param {google.cloud.videointelligence.v1.VideoContext} request.videoContext + * Additional video context and/or feature-specific parameters. + * @param {string} [request.outputUri] + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only [Cloud Storage](https://cloud.google.com/storage/) + * URIs are supported. These must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For + * more information, see [Request + * URIs](https://cloud.google.com/storage/docs/request-endpoints). + * @param {string} [request.locationId] + * Optional. Cloud region where annotation should take place. Supported cloud + * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + * region is specified, the region will be determined based on video file + * location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async + */ + annotateVideo( + request?: protos.google.cloud.videointelligence.v1.IAnnotateVideoRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + annotateVideo( + request: protos.google.cloud.videointelligence.v1.IAnnotateVideoRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request: protos.google.cloud.videointelligence.v1.IAnnotateVideoRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request?: protos.google.cloud.videointelligence.v1.IAnnotateVideoRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.annotateVideo(request, options, callback); + } +/** + * Check the status of the long running operation returned by `annotateVideo()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async + */ + async checkAnnotateVideoProgress(name: string): Promise>{ + const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.videoIntelligenceServiceStub && !this._terminated) { + return this.videoIntelligenceServiceStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/v1/src/v1/video_intelligence_service_client_config.json b/owl-bot-staging/v1/src/v1/video_intelligence_service_client_config.json new file mode 100644 index 00000000..49091879 --- /dev/null +++ b/owl-bot-staging/v1/src/v1/video_intelligence_service_client_config.json @@ -0,0 +1,40 @@ +{ + "interfaces": { + "google.cloud.videointelligence.v1.VideoIntelligenceService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + }, + "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.5, + "max_retry_delay_millis": 120000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "AnnotateVideo": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" + } + } + } + } +} diff --git a/owl-bot-staging/v1/src/v1/video_intelligence_service_proto_list.json b/owl-bot-staging/v1/src/v1/video_intelligence_service_proto_list.json new file mode 100644 index 00000000..e28d401f --- /dev/null +++ b/owl-bot-staging/v1/src/v1/video_intelligence_service_proto_list.json @@ -0,0 +1,3 @@ +[ + "../../protos/google/cloud/videointelligence/v1/video_intelligence.proto" +] diff --git a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000..85a71c33 --- /dev/null +++ b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js @@ -0,0 +1,27 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const videointelligence = require('@google-cloud/video-intelligence'); + +function main() { + const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); +} + +main(); diff --git a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000..d466c7b0 --- /dev/null +++ b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,32 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; + +// check that the client class type name can be used +function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); + doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); +} + +main(); diff --git a/owl-bot-staging/v1/system-test/install.ts b/owl-bot-staging/v1/system-test/install.ts new file mode 100644 index 00000000..8ec45222 --- /dev/null +++ b/owl-bot-staging/v1/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import { packNTest } from 'pack-n-play'; +import { readFileSync } from 'fs'; +import { describe, it } from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/v1/test/gapic_video_intelligence_service_v1.ts b/owl-bot-staging/v1/test/gapic_video_intelligence_service_v1.ts new file mode 100644 index 00000000..a62a0554 --- /dev/null +++ b/owl-bot-staging/v1/test/gapic_video_intelligence_service_v1.ts @@ -0,0 +1,259 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import { describe, it } from 'mocha'; +import * as videointelligenceserviceModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +describe('v1.VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = videointelligenceserviceModule.v1.VideoIntelligenceServiceClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = videointelligenceserviceModule.v1.VideoIntelligenceServiceClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = videointelligenceserviceModule.v1.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + await client.initialize(); + assert(client.videoIntelligenceServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.videoIntelligenceServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + + describe('annotateVideo', () => { + it('invokes annotateVideo without error', async () => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); + const [operation] = await client.annotateVideo(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo without error using callback', async () => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.annotateVideo( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes annotateVideo with call error', async () => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.annotateVideo(request), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo with LRO error', async () => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.annotateVideo(request); + await assert.rejects(operation.promise(), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes checkAnnotateVideoProgress without error', async () => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkAnnotateVideoProgress with error', async () => { + const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); +}); diff --git a/owl-bot-staging/v1/tsconfig.json b/owl-bot-staging/v1/tsconfig.json new file mode 100644 index 00000000..c78f1c88 --- /dev/null +++ b/owl-bot-staging/v1/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/v1/webpack.config.js b/owl-bot-staging/v1/webpack.config.js new file mode 100644 index 00000000..9657601b --- /dev/null +++ b/owl-bot-staging/v1/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'videointelligence', + filename: './videointelligence.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; diff --git a/owl-bot-staging/v1beta2/.eslintignore b/owl-bot-staging/v1beta2/.eslintignore new file mode 100644 index 00000000..cfc348ec --- /dev/null +++ b/owl-bot-staging/v1beta2/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/v1beta2/.eslintrc.json b/owl-bot-staging/v1beta2/.eslintrc.json new file mode 100644 index 00000000..78215349 --- /dev/null +++ b/owl-bot-staging/v1beta2/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/v1beta2/.gitignore b/owl-bot-staging/v1beta2/.gitignore new file mode 100644 index 00000000..5d32b237 --- /dev/null +++ b/owl-bot-staging/v1beta2/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/v1beta2/.jsdoc.js b/owl-bot-staging/v1beta2/.jsdoc.js new file mode 100644 index 00000000..6c816e68 --- /dev/null +++ b/owl-bot-staging/v1beta2/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/video-intelligence', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/v1beta2/.mocharc.js b/owl-bot-staging/v1beta2/.mocharc.js new file mode 100644 index 00000000..481c522b --- /dev/null +++ b/owl-bot-staging/v1beta2/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/v1beta2/.prettierrc.js b/owl-bot-staging/v1beta2/.prettierrc.js new file mode 100644 index 00000000..494e1478 --- /dev/null +++ b/owl-bot-staging/v1beta2/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/v1beta2/README.md b/owl-bot-staging/v1beta2/README.md new file mode 100644 index 00000000..d1c53e8c --- /dev/null +++ b/owl-bot-staging/v1beta2/README.md @@ -0,0 +1 @@ +Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1beta2/linkinator.config.json b/owl-bot-staging/v1beta2/linkinator.config.json new file mode 100644 index 00000000..befd23c8 --- /dev/null +++ b/owl-bot-staging/v1beta2/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/v1beta2/package.json b/owl-bot-staging/v1beta2/package.json new file mode 100644 index 00000000..6b17fa2c --- /dev/null +++ b/owl-bot-staging/v1beta2/package.json @@ -0,0 +1,64 @@ +{ + "name": "@google-cloud/video-intelligence", + "version": "0.1.0", + "description": "Videointelligence client for Node.js", + "repository": "googleapis/nodejs-videointelligence", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google videointelligence", + "videointelligence", + "video intelligence service" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^3.1.1" + }, + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^16.0.0", + "@types/sinon": "^10.0.8", + "c8": "^7.11.0", + "gts": "^3.1.0", + "jsdoc": "^3.6.7", + "jsdoc-fresh": "^1.1.1", + "jsdoc-region-tag": "^1.3.1", + "linkinator": "^3.0.0", + "mocha": "^9.1.4", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^13.0.0", + "ts-loader": "^9.2.6", + "typescript": "^4.5.5", + "webpack": "^5.67.0", + "webpack-cli": "^4.9.1" + }, + "engines": { + "node": ">=v12" + } +} diff --git a/owl-bot-staging/v1beta2/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto b/owl-bot-staging/v1beta2/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto new file mode 100644 index 00000000..81648c52 --- /dev/null +++ b/owl-bot-staging/v1beta2/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto @@ -0,0 +1,410 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.videointelligence.v1beta2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1Beta2"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1beta2"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1beta2"; +option ruby_package = "Google::Cloud::VideoIntelligence::V1beta2"; + +// Service that implements Google Cloud Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI + // may include wildcards in `object-id`, and thus identify multiple videos. + // Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via `input_uri`. + // If set, `input_uri` should be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for FACE_DETECTION. + FaceDetectionConfig face_detection_config = 5; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for FACE_DETECTION. +message FaceDetectionConfig { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; + + // Whether bounding boxes be included in the face annotation output. + bool include_bounding_boxes = 2; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g. `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// Video segment level annotation results for face detection. +message FaceSegment { + // Video segment where a face was detected. + VideoSegment segment = 1; +} + +// Video frame level annotation results for face detection. +message FaceFrame { + // Normalized Bounding boxes in a frame. + // There can be more than one boxes if the same face is detected in multiple + // locations within the current frame. + repeated NormalizedBoundingBox normalized_bounding_boxes = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this location. + google.protobuf.Duration time_offset = 2; +} + +// Face annotation. +message FaceAnnotation { + // Thumbnail of a representative face view (in JPEG format). + bytes thumbnail = 1; + + // All video segments where a face was detected. + repeated FaceSegment segments = 2; + + // All video frames where a face was detected. + repeated FaceFrame frames = 3; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Face annotations. There is exactly one element for each unique face. + repeated FaceAnnotation face_annotations = 5; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. + // Guaranteed to be 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Human face detection and tracking. + FACE_DETECTION = 4; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} diff --git a/owl-bot-staging/v1beta2/samples/generated/v1beta2/snippet_metadata.google.cloud.videointelligence.v1beta2.json b/owl-bot-staging/v1beta2/samples/generated/v1beta2/snippet_metadata.google.cloud.videointelligence.v1beta2.json new file mode 100644 index 00000000..f1f4b140 --- /dev/null +++ b/owl-bot-staging/v1beta2/samples/generated/v1beta2/snippet_metadata.google.cloud.videointelligence.v1beta2.json @@ -0,0 +1,75 @@ +{ + "clientLibrary": { + "name": "nodejs-videointelligence", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.videointelligence.v1beta2", + "version": "v1beta2" + } + ] + }, + "snippets": [ + { + "regionTag": "videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async", + "title": "videointelligence annotateVideo Sample", + "origin": "API_DEFINITION", + "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", + "canonical": true, + "file": "video_intelligence_service.annotate_video.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 91, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1beta2.VideoIntelligenceService.AnnotateVideo", + "async": true, + "parameters": [ + { + "name": "input_uri", + "type": "TYPE_STRING" + }, + { + "name": "input_content", + "type": "TYPE_BYTES" + }, + { + "name": "features", + "type": "TYPE_ENUM[]" + }, + { + "name": "video_context", + "type": ".google.cloud.videointelligence.v1beta2.VideoContext" + }, + { + "name": "output_uri", + "type": "TYPE_STRING" + }, + { + "name": "location_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "VideoIntelligenceServiceClient", + "fullName": "google.cloud.videointelligence.v1beta2.VideoIntelligenceServiceClient" + }, + "method": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1beta2.VideoIntelligenceService.AnnotateVideo", + "service": { + "shortName": "VideoIntelligenceService", + "fullName": "google.cloud.videointelligence.v1beta2.VideoIntelligenceService" + } + } + } + } + ] +} diff --git a/owl-bot-staging/v1beta2/samples/generated/v1beta2/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1beta2/samples/generated/v1beta2/video_intelligence_service.annotate_video.js new file mode 100644 index 00000000..6c39d8b5 --- /dev/null +++ b/owl-bot-staging/v1beta2/samples/generated/v1beta2/video_intelligence_service.annotate_video.js @@ -0,0 +1,99 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(features) { + // [START videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Input video location. Currently, only + * Google Cloud Storage (https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For + * more information, see Request + * URIs (https://cloud.google.com/storage/docs/request-endpoints). A video URI + * may include wildcards in `object-id`, and thus identify multiple videos. + * Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + */ + // const inputUri = 'abc123' + /** + * The video data bytes. + * If unset, the input video(s) should be specified via `input_uri`. + * If set, `input_uri` should be unset. + */ + // const inputContent = 'Buffer.from('string')' + /** + * Required. Requested video annotation features. + */ + // const features = 1234 + /** + * Additional video context and/or feature-specific parameters. + */ + // const videoContext = {} + /** + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only Google Cloud Storage (https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For + * more information, see Request + * URIs (https://cloud.google.com/storage/docs/request-endpoints). + */ + // const outputUri = 'abc123' + /** + * Optional. Cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + */ + // const locationId = 'abc123' + + // Imports the Videointelligence library + const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1beta2; + + // Instantiates a client + const videointelligenceClient = new VideoIntelligenceServiceClient(); + + async function callAnnotateVideo() { + // Construct request + const request = { + features, + }; + + // Run request + const [operation] = await videointelligenceClient.annotateVideo(request); + const [response] = await operation.promise(); + console.log(response); + } + + callAnnotateVideo(); + // [END videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta2/src/index.ts b/owl-bot-staging/v1beta2/src/index.ts new file mode 100644 index 00000000..c6f51173 --- /dev/null +++ b/owl-bot-staging/v1beta2/src/index.ts @@ -0,0 +1,25 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1beta2 from './v1beta2'; +const VideoIntelligenceServiceClient = v1beta2.VideoIntelligenceServiceClient; +type VideoIntelligenceServiceClient = v1beta2.VideoIntelligenceServiceClient; +export {v1beta2, VideoIntelligenceServiceClient}; +export default {v1beta2, VideoIntelligenceServiceClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/v1beta2/src/v1beta2/gapic_metadata.json b/owl-bot-staging/v1beta2/src/v1beta2/gapic_metadata.json new file mode 100644 index 00000000..a1dcd92a --- /dev/null +++ b/owl-bot-staging/v1beta2/src/v1beta2/gapic_metadata.json @@ -0,0 +1,33 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.videointelligence.v1beta2", + "libraryPackage": "@google-cloud/video-intelligence", + "services": { + "VideoIntelligenceService": { + "clients": { + "grpc": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1beta2/src/v1beta2/index.ts b/owl-bot-staging/v1beta2/src/v1beta2/index.ts new file mode 100644 index 00000000..6fcd1933 --- /dev/null +++ b/owl-bot-staging/v1beta2/src/v1beta2/index.ts @@ -0,0 +1,19 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client.ts b/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client.ts new file mode 100644 index 00000000..b7792576 --- /dev/null +++ b/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client.ts @@ -0,0 +1,442 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1beta2/video_intelligence_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './video_intelligence_service_client_config.json'; +import { operationsProtos } from 'google-gax'; +const version = require('../../../package.json').version; + +/** + * Service that implements Google Cloud Video Intelligence API. + * @class + * @memberof v1beta2 + */ +export class VideoIntelligenceServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + operationsClient: gax.OperationsClient; + videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of VideoIntelligenceServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + */ + constructor(opts?: ClientOptions) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.longrunning.Operations.ListOperations',get: '/v1beta2/{name=projects/*/locations/*}/operations',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1beta2/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1beta2/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1beta2/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1beta2/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1beta2/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1beta2/operations/{name=projects/*/locations/*/operations/*}:cancel',}], + }]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const annotateVideoResponse = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1beta2.AnnotateVideoResponse') as gax.protobuf.Type; + const annotateVideoMetadata = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1beta2.AnnotateVideoProgress') as gax.protobuf.Type; + + this.descriptors.longrunning = { + annotateVideo: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + annotateVideoResponse.decode.bind(annotateVideoResponse), + annotateVideoMetadata.decode.bind(annotateVideoMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.videointelligence.v1beta2.VideoIntelligenceService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = gax.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.videoIntelligenceServiceStub) { + return this.videoIntelligenceServiceStub; + } + + // Put together the "service stub" for + // google.cloud.videointelligence.v1beta2.VideoIntelligenceService. + this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1beta2.VideoIntelligenceService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.videointelligence.v1beta2.VideoIntelligenceService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const videoIntelligenceServiceStubMethods = + ['annotateVideo']; + for (const methodName of videoIntelligenceServiceStubMethods) { + const callPromise = this.videoIntelligenceServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.videoIntelligenceServiceStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'videointelligence.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + +/** + * Performs asynchronous video annotation. Progress and results can be + * retrieved through the `google.longrunning.Operations` interface. + * `Operation.metadata` contains `AnnotateVideoProgress` (progress). + * `Operation.response` contains `AnnotateVideoResponse` (results). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.inputUri + * Input video location. Currently, only + * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For + * more information, see [Request + * URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI + * may include wildcards in `object-id`, and thus identify multiple videos. + * Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + * @param {Buffer} request.inputContent + * The video data bytes. + * If unset, the input video(s) should be specified via `input_uri`. + * If set, `input_uri` should be unset. + * @param {number[]} request.features + * Required. Requested video annotation features. + * @param {google.cloud.videointelligence.v1beta2.VideoContext} request.videoContext + * Additional video context and/or feature-specific parameters. + * @param {string} [request.outputUri] + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For + * more information, see [Request + * URIs](https://cloud.google.com/storage/docs/request-endpoints). + * @param {string} [request.locationId] + * Optional. Cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1beta2/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async + */ + annotateVideo( + request?: protos.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + annotateVideo( + request: protos.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request: protos.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request?: protos.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.annotateVideo(request, options, callback); + } +/** + * Check the status of the long running operation returned by `annotateVideo()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1beta2/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async + */ + async checkAnnotateVideoProgress(name: string): Promise>{ + const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.videoIntelligenceServiceStub && !this._terminated) { + return this.videoIntelligenceServiceStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client_config.json b/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client_config.json new file mode 100644 index 00000000..f1fd51a8 --- /dev/null +++ b/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client_config.json @@ -0,0 +1,40 @@ +{ + "interfaces": { + "google.cloud.videointelligence.v1beta2.VideoIntelligenceService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + }, + "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.5, + "max_retry_delay_millis": 120000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "AnnotateVideo": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" + } + } + } + } +} diff --git a/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_proto_list.json b/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_proto_list.json new file mode 100644 index 00000000..02f0a919 --- /dev/null +++ b/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_proto_list.json @@ -0,0 +1,3 @@ +[ + "../../protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto" +] diff --git a/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000..85a71c33 --- /dev/null +++ b/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.js @@ -0,0 +1,27 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const videointelligence = require('@google-cloud/video-intelligence'); + +function main() { + const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); +} + +main(); diff --git a/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000..d466c7b0 --- /dev/null +++ b/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,32 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; + +// check that the client class type name can be used +function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); + doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); +} + +main(); diff --git a/owl-bot-staging/v1beta2/system-test/install.ts b/owl-bot-staging/v1beta2/system-test/install.ts new file mode 100644 index 00000000..8ec45222 --- /dev/null +++ b/owl-bot-staging/v1beta2/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import { packNTest } from 'pack-n-play'; +import { readFileSync } from 'fs'; +import { describe, it } from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/v1beta2/test/gapic_video_intelligence_service_v1beta2.ts b/owl-bot-staging/v1beta2/test/gapic_video_intelligence_service_v1beta2.ts new file mode 100644 index 00000000..aacaf8dc --- /dev/null +++ b/owl-bot-staging/v1beta2/test/gapic_video_intelligence_service_v1beta2.ts @@ -0,0 +1,259 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import { describe, it } from 'mocha'; +import * as videointelligenceserviceModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +describe('v1beta2.VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + await client.initialize(); + assert(client.videoIntelligenceServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.videoIntelligenceServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + + describe('annotateVideo', () => { + it('invokes annotateVideo without error', async () => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); + const [operation] = await client.annotateVideo(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo without error using callback', async () => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.annotateVideo( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes annotateVideo with call error', async () => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.annotateVideo(request), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo with LRO error', async () => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.annotateVideo(request); + await assert.rejects(operation.promise(), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes checkAnnotateVideoProgress without error', async () => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkAnnotateVideoProgress with error', async () => { + const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); +}); diff --git a/owl-bot-staging/v1beta2/tsconfig.json b/owl-bot-staging/v1beta2/tsconfig.json new file mode 100644 index 00000000..c78f1c88 --- /dev/null +++ b/owl-bot-staging/v1beta2/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/v1beta2/webpack.config.js b/owl-bot-staging/v1beta2/webpack.config.js new file mode 100644 index 00000000..9657601b --- /dev/null +++ b/owl-bot-staging/v1beta2/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'videointelligence', + filename: './videointelligence.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; diff --git a/owl-bot-staging/v1p1beta1/.eslintignore b/owl-bot-staging/v1p1beta1/.eslintignore new file mode 100644 index 00000000..cfc348ec --- /dev/null +++ b/owl-bot-staging/v1p1beta1/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/v1p1beta1/.eslintrc.json b/owl-bot-staging/v1p1beta1/.eslintrc.json new file mode 100644 index 00000000..78215349 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/v1p1beta1/.gitignore b/owl-bot-staging/v1p1beta1/.gitignore new file mode 100644 index 00000000..5d32b237 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/v1p1beta1/.jsdoc.js b/owl-bot-staging/v1p1beta1/.jsdoc.js new file mode 100644 index 00000000..6c816e68 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/video-intelligence', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/v1p1beta1/.mocharc.js b/owl-bot-staging/v1p1beta1/.mocharc.js new file mode 100644 index 00000000..481c522b --- /dev/null +++ b/owl-bot-staging/v1p1beta1/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/v1p1beta1/.prettierrc.js b/owl-bot-staging/v1p1beta1/.prettierrc.js new file mode 100644 index 00000000..494e1478 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/v1p1beta1/README.md b/owl-bot-staging/v1p1beta1/README.md new file mode 100644 index 00000000..d1c53e8c --- /dev/null +++ b/owl-bot-staging/v1p1beta1/README.md @@ -0,0 +1 @@ +Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1p1beta1/linkinator.config.json b/owl-bot-staging/v1p1beta1/linkinator.config.json new file mode 100644 index 00000000..befd23c8 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/v1p1beta1/package.json b/owl-bot-staging/v1p1beta1/package.json new file mode 100644 index 00000000..6b17fa2c --- /dev/null +++ b/owl-bot-staging/v1p1beta1/package.json @@ -0,0 +1,64 @@ +{ + "name": "@google-cloud/video-intelligence", + "version": "0.1.0", + "description": "Videointelligence client for Node.js", + "repository": "googleapis/nodejs-videointelligence", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google videointelligence", + "videointelligence", + "video intelligence service" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^3.1.1" + }, + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^16.0.0", + "@types/sinon": "^10.0.8", + "c8": "^7.11.0", + "gts": "^3.1.0", + "jsdoc": "^3.6.7", + "jsdoc-fresh": "^1.1.1", + "jsdoc-region-tag": "^1.3.1", + "linkinator": "^3.0.0", + "mocha": "^9.1.4", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^13.0.0", + "ts-loader": "^9.2.6", + "typescript": "^4.5.5", + "webpack": "^5.67.0", + "webpack-cli": "^4.9.1" + }, + "engines": { + "node": ">=v12" + } +} diff --git a/owl-bot-staging/v1p1beta1/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto b/owl-bot-staging/v1p1beta1/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto new file mode 100644 index 00000000..3c0b8b56 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto @@ -0,0 +1,450 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.videointelligence.v1p1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P1Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1p1beta1"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p1beta1"; +option ruby_package = "Google::Cloud::VideoIntelligence::V1p1beta1"; + +// Service that implements Google Cloud Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1p1beta1/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI + // may include wildcards in `object-id`, and thus identify multiple videos. + // Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via `input_uri`. + // If set, `input_uri` should be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for SPEECH_TRANSCRIPTION. + SpeechTranscriptionConfig speech_transcription_config = 6; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g. `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Output only. Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // Speech transcription. + repeated SpeechTranscription speech_transcriptions = 11; + + // Output only. If set, indicates an error. Note that for a single + // `AnnotateVideoRequest` some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Output only. Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Output only. Approximate percentage processed thus far. Guaranteed to be + // 100 when fully processed. + int32 progress_percent = 2; + + // Output only. Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Output only. Time of the most recent update. + google.protobuf.Timestamp update_time = 4; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Config for SPEECH_TRANSCRIPTION. +message SpeechTranscriptionConfig { + // Required. *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechTranscription`. The server may return fewer than + // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will + // return a maximum of one. If omitted, will return a maximum of one. + int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A means to provide context to assist the speech recognition. + repeated SpeechContext speech_contexts = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', adds punctuation to recognition result hypotheses. + // This feature is only available in select languages. Setting this for + // requests in other languages has no effect at all. The default 'false' value + // does not add punctuation to result hypotheses. NOTE: "This is currently + // offered as an experimental service, complimentary to all users. In the + // future this may be exclusively available as a premium feature." + bool enable_automatic_punctuation = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. For file formats, such as MXF or MKV, supporting multiple audio + // tracks, specify up to two tracks. Default: track 0. + repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +message SpeechContext { + // Optional. A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// A speech recognition result corresponding to a portion of the audio. +message SpeechTranscription { + // May contain one or more recognition hypotheses (up to the maximum specified + // in `max_alternatives`). These alternatives are ordered in terms of + // accuracy, with the top (first) alternative being the most probable, as + // ranked by the recognizer. + repeated SpeechRecognitionAlternative alternatives = 1; +} + +// Alternative hypotheses (a.k.a. n-best list). +message SpeechRecognitionAlternative { + // Output only. Transcript text representing the words that the user spoke. + string transcript = 1; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A list of word-specific information for each recognized word. + repeated WordInfo words = 3; +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +message WordInfo { + // Output only. Time offset relative to the beginning of the audio, and + // corresponding to the start of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration start_time = 1; + + // Output only. Time offset relative to the beginning of the audio, and + // corresponding to the end of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration end_time = 2; + + // Output only. The word corresponding to this set of information. + string word = 3; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Speech transcription. + SPEECH_TRANSCRIPTION = 6; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} diff --git a/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/snippet_metadata.google.cloud.videointelligence.v1p1beta1.json b/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/snippet_metadata.google.cloud.videointelligence.v1p1beta1.json new file mode 100644 index 00000000..62b702ba --- /dev/null +++ b/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/snippet_metadata.google.cloud.videointelligence.v1p1beta1.json @@ -0,0 +1,75 @@ +{ + "clientLibrary": { + "name": "nodejs-videointelligence", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.videointelligence.v1p1beta1", + "version": "v1p1beta1" + } + ] + }, + "snippets": [ + { + "regionTag": "videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async", + "title": "videointelligence annotateVideo Sample", + "origin": "API_DEFINITION", + "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", + "canonical": true, + "file": "video_intelligence_service.annotate_video.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 91, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService.AnnotateVideo", + "async": true, + "parameters": [ + { + "name": "input_uri", + "type": "TYPE_STRING" + }, + { + "name": "input_content", + "type": "TYPE_BYTES" + }, + { + "name": "features", + "type": "TYPE_ENUM[]" + }, + { + "name": "video_context", + "type": ".google.cloud.videointelligence.v1p1beta1.VideoContext" + }, + { + "name": "output_uri", + "type": "TYPE_STRING" + }, + { + "name": "location_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "VideoIntelligenceServiceClient", + "fullName": "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient" + }, + "method": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService.AnnotateVideo", + "service": { + "shortName": "VideoIntelligenceService", + "fullName": "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService" + } + } + } + } + ] +} diff --git a/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js new file mode 100644 index 00000000..ccf9f33c --- /dev/null +++ b/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js @@ -0,0 +1,99 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(features) { + // [START videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Input video location. Currently, only + * Google Cloud Storage (https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For + * more information, see Request + * URIs (https://cloud.google.com/storage/docs/request-endpoints). A video URI + * may include wildcards in `object-id`, and thus identify multiple videos. + * Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + */ + // const inputUri = 'abc123' + /** + * The video data bytes. + * If unset, the input video(s) should be specified via `input_uri`. + * If set, `input_uri` should be unset. + */ + // const inputContent = 'Buffer.from('string')' + /** + * Required. Requested video annotation features. + */ + // const features = 1234 + /** + * Additional video context and/or feature-specific parameters. + */ + // const videoContext = {} + /** + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only Google Cloud Storage (https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For + * more information, see Request + * URIs (https://cloud.google.com/storage/docs/request-endpoints). + */ + // const outputUri = 'abc123' + /** + * Optional. Cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + */ + // const locationId = 'abc123' + + // Imports the Videointelligence library + const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1p1beta1; + + // Instantiates a client + const videointelligenceClient = new VideoIntelligenceServiceClient(); + + async function callAnnotateVideo() { + // Construct request + const request = { + features, + }; + + // Run request + const [operation] = await videointelligenceClient.annotateVideo(request); + const [response] = await operation.promise(); + console.log(response); + } + + callAnnotateVideo(); + // [END videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1p1beta1/src/index.ts b/owl-bot-staging/v1p1beta1/src/index.ts new file mode 100644 index 00000000..25d2cfaa --- /dev/null +++ b/owl-bot-staging/v1p1beta1/src/index.ts @@ -0,0 +1,25 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1p1beta1 from './v1p1beta1'; +const VideoIntelligenceServiceClient = v1p1beta1.VideoIntelligenceServiceClient; +type VideoIntelligenceServiceClient = v1p1beta1.VideoIntelligenceServiceClient; +export {v1p1beta1, VideoIntelligenceServiceClient}; +export default {v1p1beta1, VideoIntelligenceServiceClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/gapic_metadata.json b/owl-bot-staging/v1p1beta1/src/v1p1beta1/gapic_metadata.json new file mode 100644 index 00000000..624b1379 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/src/v1p1beta1/gapic_metadata.json @@ -0,0 +1,33 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.videointelligence.v1p1beta1", + "libraryPackage": "@google-cloud/video-intelligence", + "services": { + "VideoIntelligenceService": { + "clients": { + "grpc": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/index.ts b/owl-bot-staging/v1p1beta1/src/v1p1beta1/index.ts new file mode 100644 index 00000000..6fcd1933 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/src/v1p1beta1/index.ts @@ -0,0 +1,19 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client.ts b/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client.ts new file mode 100644 index 00000000..faf95026 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client.ts @@ -0,0 +1,442 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1p1beta1/video_intelligence_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './video_intelligence_service_client_config.json'; +import { operationsProtos } from 'google-gax'; +const version = require('../../../package.json').version; + +/** + * Service that implements Google Cloud Video Intelligence API. + * @class + * @memberof v1p1beta1 + */ +export class VideoIntelligenceServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + operationsClient: gax.OperationsClient; + videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of VideoIntelligenceServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + */ + constructor(opts?: ClientOptions) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.longrunning.Operations.ListOperations',get: '/v1p1beta1/{name=projects/*/locations/*}/operations',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1p1beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1p1beta1/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1p1beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1p1beta1/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1p1beta1/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1p1beta1/operations/{name=projects/*/locations/*/operations/*}:cancel',}], + }]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const annotateVideoResponse = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse') as gax.protobuf.Type; + const annotateVideoMetadata = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress') as gax.protobuf.Type; + + this.descriptors.longrunning = { + annotateVideo: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + annotateVideoResponse.decode.bind(annotateVideoResponse), + annotateVideoMetadata.decode.bind(annotateVideoMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = gax.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.videoIntelligenceServiceStub) { + return this.videoIntelligenceServiceStub; + } + + // Put together the "service stub" for + // google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService. + this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const videoIntelligenceServiceStubMethods = + ['annotateVideo']; + for (const methodName of videoIntelligenceServiceStubMethods) { + const callPromise = this.videoIntelligenceServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.videoIntelligenceServiceStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'videointelligence.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + +/** + * Performs asynchronous video annotation. Progress and results can be + * retrieved through the `google.longrunning.Operations` interface. + * `Operation.metadata` contains `AnnotateVideoProgress` (progress). + * `Operation.response` contains `AnnotateVideoResponse` (results). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.inputUri + * Input video location. Currently, only + * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For + * more information, see [Request + * URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI + * may include wildcards in `object-id`, and thus identify multiple videos. + * Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + * @param {Buffer} request.inputContent + * The video data bytes. + * If unset, the input video(s) should be specified via `input_uri`. + * If set, `input_uri` should be unset. + * @param {number[]} request.features + * Required. Requested video annotation features. + * @param {google.cloud.videointelligence.v1p1beta1.VideoContext} request.videoContext + * Additional video context and/or feature-specific parameters. + * @param {string} [request.outputUri] + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For + * more information, see [Request + * URIs](https://cloud.google.com/storage/docs/request-endpoints). + * @param {string} [request.locationId] + * Optional. Cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async + */ + annotateVideo( + request?: protos.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + annotateVideo( + request: protos.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request: protos.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request?: protos.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.annotateVideo(request, options, callback); + } +/** + * Check the status of the long running operation returned by `annotateVideo()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async + */ + async checkAnnotateVideoProgress(name: string): Promise>{ + const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.videoIntelligenceServiceStub && !this._terminated) { + return this.videoIntelligenceServiceStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client_config.json b/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client_config.json new file mode 100644 index 00000000..5d0c24db --- /dev/null +++ b/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client_config.json @@ -0,0 +1,40 @@ +{ + "interfaces": { + "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + }, + "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.5, + "max_retry_delay_millis": 120000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "AnnotateVideo": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" + } + } + } + } +} diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_proto_list.json b/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_proto_list.json new file mode 100644 index 00000000..4213216a --- /dev/null +++ b/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_proto_list.json @@ -0,0 +1,3 @@ +[ + "../../protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto" +] diff --git a/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000..85a71c33 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.js @@ -0,0 +1,27 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const videointelligence = require('@google-cloud/video-intelligence'); + +function main() { + const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); +} + +main(); diff --git a/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000..d466c7b0 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,32 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; + +// check that the client class type name can be used +function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); + doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); +} + +main(); diff --git a/owl-bot-staging/v1p1beta1/system-test/install.ts b/owl-bot-staging/v1p1beta1/system-test/install.ts new file mode 100644 index 00000000..8ec45222 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import { packNTest } from 'pack-n-play'; +import { readFileSync } from 'fs'; +import { describe, it } from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/v1p1beta1/test/gapic_video_intelligence_service_v1p1beta1.ts b/owl-bot-staging/v1p1beta1/test/gapic_video_intelligence_service_v1p1beta1.ts new file mode 100644 index 00000000..e54dd58d --- /dev/null +++ b/owl-bot-staging/v1p1beta1/test/gapic_video_intelligence_service_v1p1beta1.ts @@ -0,0 +1,259 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import { describe, it } from 'mocha'; +import * as videointelligenceserviceModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +describe('v1p1beta1.VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + await client.initialize(); + assert(client.videoIntelligenceServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.videoIntelligenceServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + + describe('annotateVideo', () => { + it('invokes annotateVideo without error', async () => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); + const [operation] = await client.annotateVideo(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo without error using callback', async () => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.annotateVideo( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes annotateVideo with call error', async () => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.annotateVideo(request), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo with LRO error', async () => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.annotateVideo(request); + await assert.rejects(operation.promise(), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes checkAnnotateVideoProgress without error', async () => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkAnnotateVideoProgress with error', async () => { + const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); +}); diff --git a/owl-bot-staging/v1p1beta1/tsconfig.json b/owl-bot-staging/v1p1beta1/tsconfig.json new file mode 100644 index 00000000..c78f1c88 --- /dev/null +++ b/owl-bot-staging/v1p1beta1/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/v1p1beta1/webpack.config.js b/owl-bot-staging/v1p1beta1/webpack.config.js new file mode 100644 index 00000000..9657601b --- /dev/null +++ b/owl-bot-staging/v1p1beta1/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'videointelligence', + filename: './videointelligence.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; diff --git a/owl-bot-staging/v1p2beta1/.eslintignore b/owl-bot-staging/v1p2beta1/.eslintignore new file mode 100644 index 00000000..cfc348ec --- /dev/null +++ b/owl-bot-staging/v1p2beta1/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/v1p2beta1/.eslintrc.json b/owl-bot-staging/v1p2beta1/.eslintrc.json new file mode 100644 index 00000000..78215349 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/v1p2beta1/.gitignore b/owl-bot-staging/v1p2beta1/.gitignore new file mode 100644 index 00000000..5d32b237 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/v1p2beta1/.jsdoc.js b/owl-bot-staging/v1p2beta1/.jsdoc.js new file mode 100644 index 00000000..6c816e68 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/video-intelligence', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/v1p2beta1/.mocharc.js b/owl-bot-staging/v1p2beta1/.mocharc.js new file mode 100644 index 00000000..481c522b --- /dev/null +++ b/owl-bot-staging/v1p2beta1/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/v1p2beta1/.prettierrc.js b/owl-bot-staging/v1p2beta1/.prettierrc.js new file mode 100644 index 00000000..494e1478 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/v1p2beta1/README.md b/owl-bot-staging/v1p2beta1/README.md new file mode 100644 index 00000000..d1c53e8c --- /dev/null +++ b/owl-bot-staging/v1p2beta1/README.md @@ -0,0 +1 @@ +Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1p2beta1/linkinator.config.json b/owl-bot-staging/v1p2beta1/linkinator.config.json new file mode 100644 index 00000000..befd23c8 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/v1p2beta1/package.json b/owl-bot-staging/v1p2beta1/package.json new file mode 100644 index 00000000..6b17fa2c --- /dev/null +++ b/owl-bot-staging/v1p2beta1/package.json @@ -0,0 +1,64 @@ +{ + "name": "@google-cloud/video-intelligence", + "version": "0.1.0", + "description": "Videointelligence client for Node.js", + "repository": "googleapis/nodejs-videointelligence", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google videointelligence", + "videointelligence", + "video intelligence service" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^3.1.1" + }, + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^16.0.0", + "@types/sinon": "^10.0.8", + "c8": "^7.11.0", + "gts": "^3.1.0", + "jsdoc": "^3.6.7", + "jsdoc-fresh": "^1.1.1", + "jsdoc-region-tag": "^1.3.1", + "linkinator": "^3.0.0", + "mocha": "^9.1.4", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^13.0.0", + "ts-loader": "^9.2.6", + "typescript": "^4.5.5", + "webpack": "^5.67.0", + "webpack-cli": "^4.9.1" + }, + "engines": { + "node": ">=v12" + } +} diff --git a/owl-bot-staging/v1p2beta1/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto b/owl-bot-staging/v1p2beta1/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto new file mode 100644 index 00000000..c185c0aa --- /dev/null +++ b/owl-bot-staging/v1p2beta1/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto @@ -0,0 +1,489 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.videointelligence.v1p2beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P2Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1p2beta1"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p2beta1"; +option ruby_package = "Google::Cloud::VideoIntelligence::V1p2beta1"; + +// Service that implements Google Cloud Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1p2beta1/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via `input_uri`. + // If set, `input_uri` should be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for TEXT_DETECTION. + TextDetectionConfig text_detection_config = 8; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for TEXT_DETECTION. +message TextDetectionConfig { + // Language hint can be specified if the language to be detected is known a + // priori. It can increase the accuracy of the detection. Language hint must + // be language code in BCP-47 format. + // + // Automatic language detection is performed if no hint is provided. + repeated string language_hints = 1; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g. `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // OCR text detection and tracking. + // Annotations for list of detected text snippets. Each will have list of + // frame information associated with it. + repeated TextAnnotation text_annotations = 12; + + // Annotations for list of objects detected and tracked in video. + repeated ObjectTrackingAnnotation object_annotations = 14; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. Guaranteed to be + // 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// A vertex represents a 2D point in the image. +// NOTE: the normalized vertex coordinates are relative to the original image +// and range from 0 to 1. +message NormalizedVertex { + // X coordinate. + float x = 1; + + // Y coordinate. + float y = 2; +} + +// Normalized bounding polygon for text (that might not be aligned with axis). +// Contains list of the corner points in clockwise order starting from +// top-left corner. For example, for a rectangular bounding box: +// When the text is horizontal it might look like: +// 0----1 +// | | +// 3----2 +// +// When it's clockwise rotated 180 degrees around the top-left corner it +// becomes: +// 2----3 +// | | +// 1----0 +// +// and the vertex order will still be (0, 1, 2, 3). Note that values can be less +// than 0, or greater than 1 due to trignometric calculations for location of +// the box. +message NormalizedBoundingPoly { + // Normalized vertices of the bounding polygon. + repeated NormalizedVertex vertices = 1; +} + +// Video segment level annotation results for text detection. +message TextSegment { + // Video segment where a text snippet was detected. + VideoSegment segment = 1; + + // Confidence for the track of detected text. It is calculated as the highest + // over all frames where OCR detected text appears. + float confidence = 2; + + // Information related to the frames where OCR detected text appears. + repeated TextFrame frames = 3; +} + +// Video frame level annotation results for text annotation (OCR). +// Contains information regarding timestamp and bounding box locations for the +// frames containing detected OCR text snippets. +message TextFrame { + // Bounding polygon of the detected text for this frame. + NormalizedBoundingPoly rotated_bounding_box = 1; + + // Timestamp of this frame. + google.protobuf.Duration time_offset = 2; +} + +// Annotations related to one detected OCR text snippet. This will contain the +// corresponding text, confidence value, and frame level information for each +// detection. +message TextAnnotation { + // The detected text. + string text = 1; + + // All video segments where OCR detected text appears. + repeated TextSegment segments = 2; +} + +// Video frame level annotations for object detection and tracking. This field +// stores per frame location, time offset, and confidence. +message ObjectTrackingFrame { + // The normalized bounding box location of this object track for the frame. + NormalizedBoundingBox normalized_bounding_box = 1; + + // The timestamp of the frame in microseconds. + google.protobuf.Duration time_offset = 2; +} + +// Annotations corresponding to one tracked object. +message ObjectTrackingAnnotation { + // Different representation of tracking info in non-streaming batch + // and streaming modes. + oneof track_info { + // Non-streaming batch mode ONLY. + // Each object track corresponds to one video segment where it appears. + VideoSegment segment = 3; + + // Streaming mode ONLY. + // In streaming mode, we do not know the end time of a tracked object + // before it is completed. Hence, there is no VideoSegment info returned. + // Instead, we provide a unique identifiable integer track_id so that + // the customers can correlate the results of the ongoing + // ObjectTrackAnnotation of the same track_id over time. + int64 track_id = 5; + } + + // Entity to specify the object category that this track is labeled as. + Entity entity = 1; + + // Object category's labeling confidence of this track. + float confidence = 4; + + // Information corresponding to all frames where this object track appears. + repeated ObjectTrackingFrame frames = 2; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // OCR text detection and tracking. + TEXT_DETECTION = 7; + + // Object detection and tracking. + OBJECT_TRACKING = 9; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} diff --git a/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/snippet_metadata.google.cloud.videointelligence.v1p2beta1.json b/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/snippet_metadata.google.cloud.videointelligence.v1p2beta1.json new file mode 100644 index 00000000..253c9b0b --- /dev/null +++ b/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/snippet_metadata.google.cloud.videointelligence.v1p2beta1.json @@ -0,0 +1,75 @@ +{ + "clientLibrary": { + "name": "nodejs-videointelligence", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.videointelligence.v1p2beta1", + "version": "v1p2beta1" + } + ] + }, + "snippets": [ + { + "regionTag": "videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async", + "title": "videointelligence annotateVideo Sample", + "origin": "API_DEFINITION", + "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", + "canonical": true, + "file": "video_intelligence_service.annotate_video.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 89, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService.AnnotateVideo", + "async": true, + "parameters": [ + { + "name": "input_uri", + "type": "TYPE_STRING" + }, + { + "name": "input_content", + "type": "TYPE_BYTES" + }, + { + "name": "features", + "type": "TYPE_ENUM[]" + }, + { + "name": "video_context", + "type": ".google.cloud.videointelligence.v1p2beta1.VideoContext" + }, + { + "name": "output_uri", + "type": "TYPE_STRING" + }, + { + "name": "location_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "VideoIntelligenceServiceClient", + "fullName": "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceServiceClient" + }, + "method": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService.AnnotateVideo", + "service": { + "shortName": "VideoIntelligenceService", + "fullName": "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService" + } + } + } + } + ] +} diff --git a/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js new file mode 100644 index 00000000..f4e003e8 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js @@ -0,0 +1,97 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(features) { + // [START videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Input video location. Currently, only + * Google Cloud Storage (https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For more information, see + * Request URIs (https://cloud.google.com/storage/docs/request-endpoints). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + */ + // const inputUri = 'abc123' + /** + * The video data bytes. + * If unset, the input video(s) should be specified via `input_uri`. + * If set, `input_uri` should be unset. + */ + // const inputContent = 'Buffer.from('string')' + /** + * Required. Requested video annotation features. + */ + // const features = 1234 + /** + * Additional video context and/or feature-specific parameters. + */ + // const videoContext = {} + /** + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only Google Cloud Storage (https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For more information, see + * Request URIs (https://cloud.google.com/storage/docs/request-endpoints). + */ + // const outputUri = 'abc123' + /** + * Optional. Cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + */ + // const locationId = 'abc123' + + // Imports the Videointelligence library + const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1p2beta1; + + // Instantiates a client + const videointelligenceClient = new VideoIntelligenceServiceClient(); + + async function callAnnotateVideo() { + // Construct request + const request = { + features, + }; + + // Run request + const [operation] = await videointelligenceClient.annotateVideo(request); + const [response] = await operation.promise(); + console.log(response); + } + + callAnnotateVideo(); + // [END videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1p2beta1/src/index.ts b/owl-bot-staging/v1p2beta1/src/index.ts new file mode 100644 index 00000000..33d5acc3 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/src/index.ts @@ -0,0 +1,25 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1p2beta1 from './v1p2beta1'; +const VideoIntelligenceServiceClient = v1p2beta1.VideoIntelligenceServiceClient; +type VideoIntelligenceServiceClient = v1p2beta1.VideoIntelligenceServiceClient; +export {v1p2beta1, VideoIntelligenceServiceClient}; +export default {v1p2beta1, VideoIntelligenceServiceClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/gapic_metadata.json b/owl-bot-staging/v1p2beta1/src/v1p2beta1/gapic_metadata.json new file mode 100644 index 00000000..701b895c --- /dev/null +++ b/owl-bot-staging/v1p2beta1/src/v1p2beta1/gapic_metadata.json @@ -0,0 +1,33 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.videointelligence.v1p2beta1", + "libraryPackage": "@google-cloud/video-intelligence", + "services": { + "VideoIntelligenceService": { + "clients": { + "grpc": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/index.ts b/owl-bot-staging/v1p2beta1/src/v1p2beta1/index.ts new file mode 100644 index 00000000..6fcd1933 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/src/v1p2beta1/index.ts @@ -0,0 +1,19 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client.ts b/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client.ts new file mode 100644 index 00000000..6376067c --- /dev/null +++ b/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client.ts @@ -0,0 +1,440 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1p2beta1/video_intelligence_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './video_intelligence_service_client_config.json'; +import { operationsProtos } from 'google-gax'; +const version = require('../../../package.json').version; + +/** + * Service that implements Google Cloud Video Intelligence API. + * @class + * @memberof v1p2beta1 + */ +export class VideoIntelligenceServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + operationsClient: gax.OperationsClient; + videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of VideoIntelligenceServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + */ + constructor(opts?: ClientOptions) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.longrunning.Operations.ListOperations',get: '/v1p2beta1/{name=projects/*/locations/*}/operations',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1p2beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1p2beta1/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1p2beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1p2beta1/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1p2beta1/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1p2beta1/operations/{name=projects/*/locations/*/operations/*}:cancel',}], + }]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const annotateVideoResponse = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse') as gax.protobuf.Type; + const annotateVideoMetadata = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress') as gax.protobuf.Type; + + this.descriptors.longrunning = { + annotateVideo: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + annotateVideoResponse.decode.bind(annotateVideoResponse), + annotateVideoMetadata.decode.bind(annotateVideoMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = gax.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.videoIntelligenceServiceStub) { + return this.videoIntelligenceServiceStub; + } + + // Put together the "service stub" for + // google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService. + this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const videoIntelligenceServiceStubMethods = + ['annotateVideo']; + for (const methodName of videoIntelligenceServiceStubMethods) { + const callPromise = this.videoIntelligenceServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.videoIntelligenceServiceStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'videointelligence.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + +/** + * Performs asynchronous video annotation. Progress and results can be + * retrieved through the `google.longrunning.Operations` interface. + * `Operation.metadata` contains `AnnotateVideoProgress` (progress). + * `Operation.response` contains `AnnotateVideoResponse` (results). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.inputUri + * Input video location. Currently, only + * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + * @param {Buffer} request.inputContent + * The video data bytes. + * If unset, the input video(s) should be specified via `input_uri`. + * If set, `input_uri` should be unset. + * @param {number[]} request.features + * Required. Requested video annotation features. + * @param {google.cloud.videointelligence.v1p2beta1.VideoContext} request.videoContext + * Additional video context and/or feature-specific parameters. + * @param {string} [request.outputUri] + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + * @param {string} [request.locationId] + * Optional. Cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async + */ + annotateVideo( + request?: protos.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + annotateVideo( + request: protos.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request: protos.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request?: protos.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.annotateVideo(request, options, callback); + } +/** + * Check the status of the long running operation returned by `annotateVideo()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async + */ + async checkAnnotateVideoProgress(name: string): Promise>{ + const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.videoIntelligenceServiceStub && !this._terminated) { + return this.videoIntelligenceServiceStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client_config.json b/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client_config.json new file mode 100644 index 00000000..20e27528 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client_config.json @@ -0,0 +1,40 @@ +{ + "interfaces": { + "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + }, + "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.5, + "max_retry_delay_millis": 120000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "AnnotateVideo": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" + } + } + } + } +} diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_proto_list.json b/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_proto_list.json new file mode 100644 index 00000000..64abb974 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_proto_list.json @@ -0,0 +1,3 @@ +[ + "../../protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto" +] diff --git a/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000..85a71c33 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.js @@ -0,0 +1,27 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const videointelligence = require('@google-cloud/video-intelligence'); + +function main() { + const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); +} + +main(); diff --git a/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000..d466c7b0 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,32 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; + +// check that the client class type name can be used +function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); + doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); +} + +main(); diff --git a/owl-bot-staging/v1p2beta1/system-test/install.ts b/owl-bot-staging/v1p2beta1/system-test/install.ts new file mode 100644 index 00000000..8ec45222 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import { packNTest } from 'pack-n-play'; +import { readFileSync } from 'fs'; +import { describe, it } from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/v1p2beta1/test/gapic_video_intelligence_service_v1p2beta1.ts b/owl-bot-staging/v1p2beta1/test/gapic_video_intelligence_service_v1p2beta1.ts new file mode 100644 index 00000000..6793631d --- /dev/null +++ b/owl-bot-staging/v1p2beta1/test/gapic_video_intelligence_service_v1p2beta1.ts @@ -0,0 +1,259 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import { describe, it } from 'mocha'; +import * as videointelligenceserviceModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +describe('v1p2beta1.VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + await client.initialize(); + assert(client.videoIntelligenceServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.videoIntelligenceServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + + describe('annotateVideo', () => { + it('invokes annotateVideo without error', async () => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); + const [operation] = await client.annotateVideo(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo without error using callback', async () => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.annotateVideo( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes annotateVideo with call error', async () => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.annotateVideo(request), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo with LRO error', async () => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.annotateVideo(request); + await assert.rejects(operation.promise(), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes checkAnnotateVideoProgress without error', async () => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkAnnotateVideoProgress with error', async () => { + const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); +}); diff --git a/owl-bot-staging/v1p2beta1/tsconfig.json b/owl-bot-staging/v1p2beta1/tsconfig.json new file mode 100644 index 00000000..c78f1c88 --- /dev/null +++ b/owl-bot-staging/v1p2beta1/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/v1p2beta1/webpack.config.js b/owl-bot-staging/v1p2beta1/webpack.config.js new file mode 100644 index 00000000..9657601b --- /dev/null +++ b/owl-bot-staging/v1p2beta1/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'videointelligence', + filename: './videointelligence.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; diff --git a/owl-bot-staging/v1p3beta1/.eslintignore b/owl-bot-staging/v1p3beta1/.eslintignore new file mode 100644 index 00000000..cfc348ec --- /dev/null +++ b/owl-bot-staging/v1p3beta1/.eslintignore @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/owl-bot-staging/v1p3beta1/.eslintrc.json b/owl-bot-staging/v1p3beta1/.eslintrc.json new file mode 100644 index 00000000..78215349 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/owl-bot-staging/v1p3beta1/.gitignore b/owl-bot-staging/v1p3beta1/.gitignore new file mode 100644 index 00000000..5d32b237 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/owl-bot-staging/v1p3beta1/.jsdoc.js b/owl-bot-staging/v1p3beta1/.jsdoc.js new file mode 100644 index 00000000..6c816e68 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2022 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/video-intelligence', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/owl-bot-staging/v1p3beta1/.mocharc.js b/owl-bot-staging/v1p3beta1/.mocharc.js new file mode 100644 index 00000000..481c522b --- /dev/null +++ b/owl-bot-staging/v1p3beta1/.mocharc.js @@ -0,0 +1,33 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/owl-bot-staging/v1p3beta1/.prettierrc.js b/owl-bot-staging/v1p3beta1/.prettierrc.js new file mode 100644 index 00000000..494e1478 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/.prettierrc.js @@ -0,0 +1,22 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/owl-bot-staging/v1p3beta1/README.md b/owl-bot-staging/v1p3beta1/README.md new file mode 100644 index 00000000..d1c53e8c --- /dev/null +++ b/owl-bot-staging/v1p3beta1/README.md @@ -0,0 +1 @@ +Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1p3beta1/linkinator.config.json b/owl-bot-staging/v1p3beta1/linkinator.config.json new file mode 100644 index 00000000..befd23c8 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/linkinator.config.json @@ -0,0 +1,16 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io", + "https://console.cloud.google.com/cloudshell", + "https://support.google.com" + ], + "silent": true, + "concurrency": 5, + "retry": true, + "retryErrors": true, + "retryErrorsCount": 5, + "retryErrorsJitter": 3000 +} diff --git a/owl-bot-staging/v1p3beta1/package.json b/owl-bot-staging/v1p3beta1/package.json new file mode 100644 index 00000000..ad2f8dab --- /dev/null +++ b/owl-bot-staging/v1p3beta1/package.json @@ -0,0 +1,65 @@ +{ + "name": "@google-cloud/video-intelligence", + "version": "0.1.0", + "description": "Videointelligence client for Node.js", + "repository": "googleapis/nodejs-videointelligence", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google videointelligence", + "videointelligence", + "streaming video intelligence service", + "video intelligence service" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^3.1.1" + }, + "devDependencies": { + "@types/mocha": "^9.1.0", + "@types/node": "^16.0.0", + "@types/sinon": "^10.0.8", + "c8": "^7.11.0", + "gts": "^3.1.0", + "jsdoc": "^3.6.7", + "jsdoc-fresh": "^1.1.1", + "jsdoc-region-tag": "^1.3.1", + "linkinator": "^3.0.0", + "mocha": "^9.1.4", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^13.0.0", + "ts-loader": "^9.2.6", + "typescript": "^4.5.5", + "webpack": "^5.67.0", + "webpack-cli": "^4.9.1" + }, + "engines": { + "node": ">=v12" + } +} diff --git a/owl-bot-staging/v1p3beta1/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto b/owl-bot-staging/v1p3beta1/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto new file mode 100644 index 00000000..db039e67 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto @@ -0,0 +1,1090 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.videointelligence.v1p3beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P3Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1p3beta1"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p3beta1"; +option ruby_package = "Google::Cloud::VideoIntelligence::V1p3beta1"; + +// Service that implements the Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1p3beta1/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Service that implements streaming Video Intelligence API. +service StreamingVideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Performs video annotation with bidirectional streaming: emitting results + // while sending video/audio bytes. + // This method is only available via the gRPC API (not REST). + rpc StreamingAnnotateVideo(stream StreamingAnnotateVideoRequest) + returns (stream StreamingAnnotateVideoResponse) {} +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported. URIs must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify + // multiple videos, a video URI may include wildcards in the `object-id`. + // Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` must be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via the `input_uri`. + // If set, `input_uri` must be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported. These must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + // region is specified, the region will be determined based on video file + // location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for FACE_DETECTION. + FaceDetectionConfig face_detection_config = 5; + + // Config for SPEECH_TRANSCRIPTION. + SpeechTranscriptionConfig speech_transcription_config = 6; + + // Config for TEXT_DETECTION. + TextDetectionConfig text_detection_config = 8; + + // Config for PERSON_DETECTION. + PersonDetectionConfig person_detection_config = 11; + + // Config for OBJECT_TRACKING. + ObjectTrackingConfig object_tracking_config = 13; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e., non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; + + // The confidence threshold we perform filtering on the labels from + // frame-level detection. If not set, it is set to 0.4 by default. The valid + // range for this threshold is [0.1, 0.9]. Any value set outside of this + // range will be clipped. + // Note: For best results, follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float frame_confidence_threshold = 4; + + // The confidence threshold we perform filtering on the labels from + // video-level and shot-level detections. If not set, it's set to 0.3 by + // default. The valid range for this threshold is [0.1, 0.9]. Any value set + // outside of this range will be clipped. + // Note: For best results, follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float video_confidence_threshold = 5; +} + +// Streaming video annotation feature. +enum StreamingFeature { + // Unspecified. + STREAMING_FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + STREAMING_LABEL_DETECTION = 1; + + // Shot change detection. + STREAMING_SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + STREAMING_EXPLICIT_CONTENT_DETECTION = 3; + + // Object detection and tracking. + STREAMING_OBJECT_TRACKING = 4; + + // Action recognition based on AutoML model. + STREAMING_AUTOML_ACTION_RECOGNITION = 23; + + // Video classification based on AutoML model. + STREAMING_AUTOML_CLASSIFICATION = 21; + + // Object detection and tracking based on AutoML model. + STREAMING_AUTOML_OBJECT_TRACKING = 22; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Human face detection. + FACE_DETECTION = 4; + + // Speech transcription. + SPEECH_TRANSCRIPTION = 6; + + // OCR text detection and tracking. + TEXT_DETECTION = 7; + + // Object detection and tracking. + OBJECT_TRACKING = 9; + + // Logo detection, tracking, and recognition. + LOGO_RECOGNITION = 12; + + // Celebrity recognition. + CELEBRITY_RECOGNITION = 13; + + // Person detection. + PERSON_DETECTION = 14; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for OBJECT_TRACKING. +message ObjectTrackingConfig { + // Model to use for object tracking. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for FACE_DETECTION. +message FaceDetectionConfig { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; + + // Whether bounding boxes are included in the face annotation output. + bool include_bounding_boxes = 2; + + // Whether to enable face attributes detection, such as glasses, dark_glasses, + // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 5; +} + +// Config for PERSON_DETECTION. +message PersonDetectionConfig { + // Whether bounding boxes are included in the person detection annotation + // output. + bool include_bounding_boxes = 1; + + // Whether to enable pose landmarks detection. Ignored if + // 'include_bounding_boxes' is set to false. + bool include_pose_landmarks = 2; + + // Whether to enable person attributes detection, such as cloth color (black, + // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, + // etc. + // Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 3; +} + +// Config for TEXT_DETECTION. +message TextDetectionConfig { + // Language hint can be specified if the language to be detected is known a + // priori. It can increase the accuracy of the detection. Language hint must + // be language code in BCP-47 format. + // + // Automatic language detection is performed if no hint is provided. + repeated string language_hints = 1; + + // Model to use for text detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 2; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g., `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // For example, when the label is `Terrier`, the category is likely `dog`. And + // in some cases there might be more than one categories e.g., `Terrier` could + // also be a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// For tracking related features. +// An object at time_offset with attributes, and located with +// normalized_bounding_box. +message TimestampedObject { + // Normalized Bounding box in a frame, where the object is located. + NormalizedBoundingBox normalized_bounding_box = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this object. + google.protobuf.Duration time_offset = 2; + + // Optional. The attributes of the object in the bounding box. + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The detected landmarks. + repeated DetectedLandmark landmarks = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A track of an object instance. +message Track { + // Video segment of a track. + VideoSegment segment = 1; + + // The object with timestamp and attributes per frame in the track. + repeated TimestampedObject timestamped_objects = 2; + + // Optional. Attributes in the track level. + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The confidence score of the tracked object. + float confidence = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A generic detected attribute represented by name in string format. +message DetectedAttribute { + // The name of the attribute, for example, glasses, dark_glasses, mouth_open. + // A full list of supported type names will be provided in the document. + string name = 1; + + // Detected attribute confidence. Range [0, 1]. + float confidence = 2; + + // Text value of the detection result. For example, the value for "HairColor" + // can be "black", "blonde", etc. + string value = 3; +} + +// Celebrity definition. +message Celebrity { + // The resource name of the celebrity. Have the format + // `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery. + // kg-mid is the id in Google knowledge graph, which is unique for the + // celebrity. + string name = 1; + + // The celebrity name. + string display_name = 2; + + // Textual description of additional information about the celebrity, if + // applicable. + string description = 3; +} + +// The annotation result of a celebrity face track. RecognizedCelebrity field +// could be empty if the face track does not have any matched celebrities. +message CelebrityTrack { + // The recognized celebrity with confidence score. + message RecognizedCelebrity { + // The recognized celebrity. + Celebrity celebrity = 1; + + // Recognition confidence. Range [0, 1]. + float confidence = 2; + } + + // Top N match of the celebrities for the face in this track. + repeated RecognizedCelebrity celebrities = 1; + + // A track of a person's face. + Track face_track = 3; +} + +// Celebrity recognition annotation per video. +message CelebrityRecognitionAnnotation { + // The tracks detected from the input video, including recognized celebrities + // and other detected faces in the video. + repeated CelebrityTrack celebrity_tracks = 1; +} + +// A generic detected landmark represented by name in string format and a 2D +// location. +message DetectedLandmark { + // The name of this landmark, for example, left_hand, right_shoulder. + string name = 1; + + // The 2D point of the detected landmark using the normalized image + // coordindate system. The normalized coordinates have the range from 0 to 1. + NormalizedVertex point = 2; + + // The confidence score of the detected landmark. Range [0, 1]. + float confidence = 3; +} + +// Face detection annotation. +message FaceDetectionAnnotation { + // The face tracks with attributes. + repeated Track tracks = 3; + + // The thumbnail of a person's face. + bytes thumbnail = 4; +} + +// Person detection annotation per video. +message PersonDetectionAnnotation { + // The detected tracks of a person. + repeated Track tracks = 1; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Video segment on which the annotation is run. + VideoSegment segment = 10; + + // Topical label annotations on video level or user-specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Presence label annotations on video level or user-specified segment level. + // There is exactly one element for each unique label. Compared to the + // existing topical `segment_label_annotations`, this field presents more + // fine-grained, segment-level labels detected in video content and is made + // available only when the client sets `LabelDetectionConfig.model` to + // "builtin/latest" in the request. + repeated LabelAnnotation segment_presence_label_annotations = 23; + + // Topical label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Presence label annotations on shot level. There is exactly one element for + // each unique label. Compared to the existing topical + // `shot_label_annotations`, this field presents more fine-grained, shot-level + // labels detected in video content and is made available only when the client + // sets `LabelDetectionConfig.model` to "builtin/latest" in the request. + repeated LabelAnnotation shot_presence_label_annotations = 24; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Face detection annotations. + repeated FaceDetectionAnnotation face_detection_annotations = 13; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // Speech transcription. + repeated SpeechTranscription speech_transcriptions = 11; + + // OCR text detection and tracking. + // Annotations for list of detected text snippets. Each will have list of + // frame information associated with it. + repeated TextAnnotation text_annotations = 12; + + // Annotations for list of objects detected and tracked in video. + repeated ObjectTrackingAnnotation object_annotations = 14; + + // Annotations for list of logos detected, tracked and recognized in video. + repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; + + // Person detection annotations. + repeated PersonDetectionAnnotation person_detection_annotations = 20; + + // Celebrity recognition annotations. + CelebrityRecognitionAnnotation celebrity_recognition_annotations = 21; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. Guaranteed to be + // 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; + + // Specifies which feature is being tracked if the request contains more than + // one feature. + Feature feature = 5; + + // Specifies which segment is being tracked if the request contains more than + // one segment. + VideoSegment segment = 6; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Config for SPEECH_TRANSCRIPTION. +message SpeechTranscriptionConfig { + // Required. *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechTranscription`. The server may return fewer than + // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will + // return a maximum of one. If omitted, will return a maximum of one. + int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A means to provide context to assist the speech recognition. + repeated SpeechContext speech_contexts = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', adds punctuation to recognition result hypotheses. + // This feature is only available in select languages. Setting this for + // requests in other languages has no effect at all. The default 'false' value + // does not add punctuation to result hypotheses. NOTE: "This is currently + // offered as an experimental service, complimentary to all users. In the + // future this may be exclusively available as a premium feature." + bool enable_automatic_punctuation = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. For file formats, such as MXF or MKV, supporting multiple audio + // tracks, specify up to two tracks. Default: track 0. + repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', enables speaker detection for each recognized word in + // the top alternative of the recognition result using a speaker_tag provided + // in the WordInfo. + // Note: When this is true, we send all the words from the beginning of the + // audio for the top alternative in every consecutive response. + // This is done in order to improve our speaker tags as our models learn to + // identify the speakers in the conversation over time. + bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set, specifies the estimated number of speakers in the + // conversation. If not set, defaults to '2'. Ignored unless + // enable_speaker_diarization is set to true. + int32 diarization_speaker_count = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If `true`, the top result includes a list of words and the + // confidence for those words. If `false`, no word-level confidence + // information is returned. The default is `false`. + bool enable_word_confidence = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +message SpeechContext { + // Optional. A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// A speech recognition result corresponding to a portion of the audio. +message SpeechTranscription { + // May contain one or more recognition hypotheses (up to the maximum specified + // in `max_alternatives`). These alternatives are ordered in terms of + // accuracy, with the top (first) alternative being the most probable, as + // ranked by the recognizer. + repeated SpeechRecognitionAlternative alternatives = 1; + + // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) + // language tag of the language in this result. This language code was + // detected to have the most likelihood of being spoken in the audio. + string language_code = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Alternative hypotheses (a.k.a. n-best list). +message SpeechRecognitionAlternative { + // Transcript text representing the words that the user spoke. + string transcript = 1; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A list of word-specific information for each recognized word. + // Note: When `enable_speaker_diarization` is set to true, you will see all + // the words from the beginning of the audio. + repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +message WordInfo { + // Time offset relative to the beginning of the audio, and + // corresponding to the start of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration start_time = 1; + + // Time offset relative to the beginning of the audio, and + // corresponding to the end of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration end_time = 2; + + // The word corresponding to this set of information. + string word = 3; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A distinct integer value is assigned for every speaker within + // the audio. This field specifies which one of those speakers was detected to + // have spoken this word. Value ranges from 1 up to diarization_speaker_count, + // and is only set if speaker diarization is enabled. + int32 speaker_tag = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A vertex represents a 2D point in the image. +// NOTE: the normalized vertex coordinates are relative to the original image +// and range from 0 to 1. +message NormalizedVertex { + // X coordinate. + float x = 1; + + // Y coordinate. + float y = 2; +} + +// Normalized bounding polygon for text (that might not be aligned with axis). +// Contains list of the corner points in clockwise order starting from +// top-left corner. For example, for a rectangular bounding box: +// When the text is horizontal it might look like: +// 0----1 +// | | +// 3----2 +// +// When it's clockwise rotated 180 degrees around the top-left corner it +// becomes: +// 2----3 +// | | +// 1----0 +// +// and the vertex order will still be (0, 1, 2, 3). Note that values can be less +// than 0, or greater than 1 due to trignometric calculations for location of +// the box. +message NormalizedBoundingPoly { + // Normalized vertices of the bounding polygon. + repeated NormalizedVertex vertices = 1; +} + +// Video segment level annotation results for text detection. +message TextSegment { + // Video segment where a text snippet was detected. + VideoSegment segment = 1; + + // Confidence for the track of detected text. It is calculated as the highest + // over all frames where OCR detected text appears. + float confidence = 2; + + // Information related to the frames where OCR detected text appears. + repeated TextFrame frames = 3; +} + +// Video frame level annotation results for text annotation (OCR). +// Contains information regarding timestamp and bounding box locations for the +// frames containing detected OCR text snippets. +message TextFrame { + // Bounding polygon of the detected text for this frame. + NormalizedBoundingPoly rotated_bounding_box = 1; + + // Timestamp of this frame. + google.protobuf.Duration time_offset = 2; +} + +// Annotations related to one detected OCR text snippet. This will contain the +// corresponding text, confidence value, and frame level information for each +// detection. +message TextAnnotation { + // The detected text. + string text = 1; + + // All video segments where OCR detected text appears. + repeated TextSegment segments = 2; +} + +// Video frame level annotations for object detection and tracking. This field +// stores per frame location, time offset, and confidence. +message ObjectTrackingFrame { + // The normalized bounding box location of this object track for the frame. + NormalizedBoundingBox normalized_bounding_box = 1; + + // The timestamp of the frame in microseconds. + google.protobuf.Duration time_offset = 2; +} + +// Annotations corresponding to one tracked object. +message ObjectTrackingAnnotation { + // Different representation of tracking info in non-streaming batch + // and streaming modes. + oneof track_info { + // Non-streaming batch mode ONLY. + // Each object track corresponds to one video segment where it appears. + VideoSegment segment = 3; + + // Streaming mode ONLY. + // In streaming mode, we do not know the end time of a tracked object + // before it is completed. Hence, there is no VideoSegment info returned. + // Instead, we provide a unique identifiable integer track_id so that + // the customers can correlate the results of the ongoing + // ObjectTrackAnnotation of the same track_id over time. + int64 track_id = 5; + } + + // Entity to specify the object category that this track is labeled as. + Entity entity = 1; + + // Object category's labeling confidence of this track. + float confidence = 4; + + // Information corresponding to all frames where this object track appears. + // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + // messages in frames. + // Streaming mode: it can only be one ObjectTrackingFrame message in frames. + repeated ObjectTrackingFrame frames = 2; +} + +// Annotation corresponding to one detected, tracked and recognized logo class. +message LogoRecognitionAnnotation { + // Entity category information to specify the logo class that all the logo + // tracks within this LogoRecognitionAnnotation are recognized as. + Entity entity = 1; + + // All logo tracks where the recognized logo appears. Each track corresponds + // to one logo instance appearing in consecutive frames. + repeated Track tracks = 2; + + // All video segments where the recognized logo appears. There might be + // multiple instances of the same logo class appearing in one VideoSegment. + repeated VideoSegment segments = 3; +} + +// The top-level message sent by the client for the `StreamingAnnotateVideo` +// method. Multiple `StreamingAnnotateVideoRequest` messages are sent. +// The first message must only contain a `StreamingVideoConfig` message. +// All subsequent messages must only contain `input_content` data. +message StreamingAnnotateVideoRequest { + // *Required* The streaming request, which is either a streaming config or + // video content. + oneof streaming_request { + // Provides information to the annotator, specifing how to process the + // request. The first `AnnotateStreamingVideoRequest` message must only + // contain a `video_config` message. + StreamingVideoConfig video_config = 1; + + // The video data to be annotated. Chunks of video data are sequentially + // sent in `StreamingAnnotateVideoRequest` messages. Except the initial + // `StreamingAnnotateVideoRequest` message containing only + // `video_config`, all subsequent `AnnotateStreamingVideoRequest` + // messages must only contain `input_content` field. + // Note: as with all bytes fields, protobuffers use a pure binary + // representation (not base64). + bytes input_content = 2; + } +} + +// Provides information to the annotator that specifies how to process the +// request. +message StreamingVideoConfig { + // Config for requested annotation feature. + oneof streaming_config { + // Config for STREAMING_SHOT_CHANGE_DETECTION. + StreamingShotChangeDetectionConfig shot_change_detection_config = 2; + + // Config for STREAMING_LABEL_DETECTION. + StreamingLabelDetectionConfig label_detection_config = 3; + + // Config for STREAMING_EXPLICIT_CONTENT_DETECTION. + StreamingExplicitContentDetectionConfig explicit_content_detection_config = + 4; + + // Config for STREAMING_OBJECT_TRACKING. + StreamingObjectTrackingConfig object_tracking_config = 5; + + // Config for STREAMING_AUTOML_ACTION_RECOGNITION. + StreamingAutomlActionRecognitionConfig automl_action_recognition_config = + 23; + + // Config for STREAMING_AUTOML_CLASSIFICATION. + StreamingAutomlClassificationConfig automl_classification_config = 21; + + // Config for STREAMING_AUTOML_OBJECT_TRACKING. + StreamingAutomlObjectTrackingConfig automl_object_tracking_config = 22; + } + + // Requested annotation feature. + StreamingFeature feature = 1; + + // Streaming storage option. By default: storage is disabled. + StreamingStorageConfig storage_config = 30; +} + +// `StreamingAnnotateVideoResponse` is the only message returned to the client +// by `StreamingAnnotateVideo`. A series of zero or more +// `StreamingAnnotateVideoResponse` messages are streamed back to the client. +message StreamingAnnotateVideoResponse { + // If set, returns a [google.rpc.Status][google.rpc.Status] message that + // specifies the error for the operation. + google.rpc.Status error = 1; + + // Streaming annotation results. + StreamingVideoAnnotationResults annotation_results = 2; + + // Google Cloud Storage(GCS) URI that stores annotation results of one + // streaming session in JSON format. + // It is the annotation_result_storage_directory + // from the request followed by '/cloud_project_number-session_id'. + string annotation_results_uri = 3; +} + +// Streaming annotation results corresponding to a portion of the video +// that is currently being processed. +message StreamingVideoAnnotationResults { + // Shot annotation results. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 1; + + // Label annotation results. + repeated LabelAnnotation label_annotations = 2; + + // Explicit content annotation results. + ExplicitContentAnnotation explicit_annotation = 3; + + // Object tracking results. + repeated ObjectTrackingAnnotation object_annotations = 4; +} + +// Config for STREAMING_SHOT_CHANGE_DETECTION. +message StreamingShotChangeDetectionConfig {} + +// Config for STREAMING_LABEL_DETECTION. +message StreamingLabelDetectionConfig { + // Whether the video has been captured from a stationary (i.e. non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Default: false. + bool stationary_camera = 1; +} + +// Config for STREAMING_EXPLICIT_CONTENT_DETECTION. +message StreamingExplicitContentDetectionConfig {} + +// Config for STREAMING_OBJECT_TRACKING. +message StreamingObjectTrackingConfig {} + +// Config for STREAMING_AUTOML_ACTION_RECOGNITION. +message StreamingAutomlActionRecognitionConfig { + // Resource name of AutoML model. + // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` + string model_name = 1; +} + +// Config for STREAMING_AUTOML_CLASSIFICATION. +message StreamingAutomlClassificationConfig { + // Resource name of AutoML model. + // Format: + // `projects/{project_number}/locations/{location_id}/models/{model_id}` + string model_name = 1; +} + +// Config for STREAMING_AUTOML_OBJECT_TRACKING. +message StreamingAutomlObjectTrackingConfig { + // Resource name of AutoML model. + // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` + string model_name = 1; +} + +// Config for streaming storage option. +message StreamingStorageConfig { + // Enable streaming storage. Default: false. + bool enable_storage_annotation_result = 1; + + // Cloud Storage URI to store all annotation results for one client. Client + // should specify this field as the top-level storage directory. Annotation + // results of different sessions will be put into different sub-directories + // denoted by project_name and session_id. All sub-directories will be auto + // generated by program and will be made accessible to client in response + // proto. URIs must be specified in the following format: + // `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage + // bucket created by client and bucket permission shall also be configured + // properly. `object-id` can be arbitrary string that make sense to client. + // Other URI formats will return error and cause Cloud Storage write failure. + string annotation_result_storage_directory = 3; +} diff --git a/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/snippet_metadata.google.cloud.videointelligence.v1p3beta1.json b/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/snippet_metadata.google.cloud.videointelligence.v1p3beta1.json new file mode 100644 index 00000000..3bde5e7a --- /dev/null +++ b/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/snippet_metadata.google.cloud.videointelligence.v1p3beta1.json @@ -0,0 +1,119 @@ +{ + "clientLibrary": { + "name": "nodejs-videointelligence", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.videointelligence.v1p3beta1", + "version": "v1p3beta1" + } + ] + }, + "snippets": [ + { + "regionTag": "videointelligence_v1p3beta1_generated_StreamingVideoIntelligenceService_StreamingAnnotateVideo_async", + "title": "videointelligence streamingAnnotateVideo Sample", + "origin": "API_DEFINITION", + "description": " Performs video annotation with bidirectional streaming: emitting results while sending video/audio bytes. This method is only available via the gRPC API (not REST).", + "canonical": true, + "file": "streaming_video_intelligence_service.streaming_annotate_video.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 65, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "StreamingAnnotateVideo", + "fullName": "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService.StreamingAnnotateVideo", + "async": true, + "parameters": [ + { + "name": "video_config", + "type": ".google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig" + }, + { + "name": "input_content", + "type": "TYPE_BYTES" + } + ], + "resultType": ".google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse", + "client": { + "shortName": "StreamingVideoIntelligenceServiceClient", + "fullName": "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient" + }, + "method": { + "shortName": "StreamingAnnotateVideo", + "fullName": "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService.StreamingAnnotateVideo", + "service": { + "shortName": "StreamingVideoIntelligenceService", + "fullName": "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService" + } + } + } + }, + { + "regionTag": "videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async", + "title": "videointelligence annotateVideo Sample", + "origin": "API_DEFINITION", + "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", + "canonical": true, + "file": "video_intelligence_service.annotate_video.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 92, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService.AnnotateVideo", + "async": true, + "parameters": [ + { + "name": "input_uri", + "type": "TYPE_STRING" + }, + { + "name": "input_content", + "type": "TYPE_BYTES" + }, + { + "name": "features", + "type": "TYPE_ENUM[]" + }, + { + "name": "video_context", + "type": ".google.cloud.videointelligence.v1p3beta1.VideoContext" + }, + { + "name": "output_uri", + "type": "TYPE_STRING" + }, + { + "name": "location_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.longrunning.Operation", + "client": { + "shortName": "VideoIntelligenceServiceClient", + "fullName": "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient" + }, + "method": { + "shortName": "AnnotateVideo", + "fullName": "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService.AnnotateVideo", + "service": { + "shortName": "VideoIntelligenceService", + "fullName": "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService" + } + } + } + } + ] +} diff --git a/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js b/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js new file mode 100644 index 00000000..2236cb17 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js @@ -0,0 +1,73 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main() { + // [START videointelligence_v1p3beta1_generated_StreamingVideoIntelligenceService_StreamingAnnotateVideo_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Provides information to the annotator, specifing how to process the + * request. The first `AnnotateStreamingVideoRequest` message must only + * contain a `video_config` message. + */ + // const videoConfig = {} + /** + * The video data to be annotated. Chunks of video data are sequentially + * sent in `StreamingAnnotateVideoRequest` messages. Except the initial + * `StreamingAnnotateVideoRequest` message containing only + * `video_config`, all subsequent `AnnotateStreamingVideoRequest` + * messages must only contain `input_content` field. + * Note: as with all bytes fields, protobuffers use a pure binary + * representation (not base64). + */ + // const inputContent = 'Buffer.from('string')' + + // Imports the Videointelligence library + const {StreamingVideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1p3beta1; + + // Instantiates a client + const videointelligenceClient = new StreamingVideoIntelligenceServiceClient(); + + async function callStreamingAnnotateVideo() { + // Construct request + const request = { + }; + + // Run request + const stream = await videointelligenceClient.streamingAnnotateVideo(); + stream.on('data', (response) => { console.log(response) }); + stream.on('error', (err) => { throw(err) }); + stream.on('end', () => { /* API call completed */ }); + stream.write(request); + stream.end(); + } + + callStreamingAnnotateVideo(); + // [END videointelligence_v1p3beta1_generated_StreamingVideoIntelligenceService_StreamingAnnotateVideo_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js new file mode 100644 index 00000000..4f56a02a --- /dev/null +++ b/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js @@ -0,0 +1,100 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(features) { + // [START videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Input video location. Currently, only + * Cloud Storage (https://cloud.google.com/storage/) URIs are + * supported. URIs must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For + * more information, see Request + * URIs (https://cloud.google.com/storage/docs/request-endpoints). To identify + * multiple videos, a video URI may include wildcards in the `object-id`. + * Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` must be unset. + */ + // const inputUri = 'abc123' + /** + * The video data bytes. + * If unset, the input video(s) should be specified via the `input_uri`. + * If set, `input_uri` must be unset. + */ + // const inputContent = 'Buffer.from('string')' + /** + * Required. Requested video annotation features. + */ + // const features = 1234 + /** + * Additional video context and/or feature-specific parameters. + */ + // const videoContext = {} + /** + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only Cloud Storage (https://cloud.google.com/storage/) + * URIs are supported. These must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For + * more information, see Request + * URIs (https://cloud.google.com/storage/docs/request-endpoints). + */ + // const outputUri = 'abc123' + /** + * Optional. Cloud region where annotation should take place. Supported cloud + * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + * region is specified, the region will be determined based on video file + * location. + */ + // const locationId = 'abc123' + + // Imports the Videointelligence library + const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1p3beta1; + + // Instantiates a client + const videointelligenceClient = new VideoIntelligenceServiceClient(); + + async function callAnnotateVideo() { + // Construct request + const request = { + features, + }; + + // Run request + const [operation] = await videointelligenceClient.annotateVideo(request); + const [response] = await operation.promise(); + console.log(response); + } + + callAnnotateVideo(); + // [END videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1p3beta1/src/index.ts b/owl-bot-staging/v1p3beta1/src/index.ts new file mode 100644 index 00000000..0b13624e --- /dev/null +++ b/owl-bot-staging/v1p3beta1/src/index.ts @@ -0,0 +1,27 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v1p3beta1 from './v1p3beta1'; +const StreamingVideoIntelligenceServiceClient = v1p3beta1.StreamingVideoIntelligenceServiceClient; +type StreamingVideoIntelligenceServiceClient = v1p3beta1.StreamingVideoIntelligenceServiceClient; +const VideoIntelligenceServiceClient = v1p3beta1.VideoIntelligenceServiceClient; +type VideoIntelligenceServiceClient = v1p3beta1.VideoIntelligenceServiceClient; +export {v1p3beta1, StreamingVideoIntelligenceServiceClient, VideoIntelligenceServiceClient}; +export default {v1p3beta1, StreamingVideoIntelligenceServiceClient, VideoIntelligenceServiceClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/gapic_metadata.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/gapic_metadata.json new file mode 100644 index 00000000..eac12ff5 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/src/v1p3beta1/gapic_metadata.json @@ -0,0 +1,51 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "typescript", + "protoPackage": "google.cloud.videointelligence.v1p3beta1", + "libraryPackage": "@google-cloud/video-intelligence", + "services": { + "StreamingVideoIntelligenceService": { + "clients": { + "grpc": { + "libraryClient": "StreamingVideoIntelligenceServiceClient", + "rpcs": { + "StreamingAnnotateVideo": { + "methods": [ + "streamingAnnotateVideo" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "StreamingVideoIntelligenceServiceClient", + "rpcs": {} + } + } + }, + "VideoIntelligenceService": { + "clients": { + "grpc": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + }, + "grpc-fallback": { + "libraryClient": "VideoIntelligenceServiceClient", + "rpcs": { + "AnnotateVideo": { + "methods": [ + "annotateVideo" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/index.ts b/owl-bot-staging/v1p3beta1/src/v1p3beta1/index.ts new file mode 100644 index 00000000..a8ec5240 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/src/v1p3beta1/index.ts @@ -0,0 +1,20 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {StreamingVideoIntelligenceServiceClient} from './streaming_video_intelligence_service_client'; +export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client.ts b/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client.ts new file mode 100644 index 00000000..436e6858 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client.ts @@ -0,0 +1,331 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import {Callback, CallOptions, Descriptors, ClientOptions, GoogleError} from 'google-gax'; + +import { PassThrough } from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1p3beta1/streaming_video_intelligence_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './streaming_video_intelligence_service_client_config.json'; + +const version = require('../../../package.json').version; + +/** + * Service that implements streaming Video Intelligence API. + * @class + * @memberof v1p3beta1 + */ +export class StreamingVideoIntelligenceServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + streamingVideoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of StreamingVideoIntelligenceServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + */ + constructor(opts?: ClientOptions) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof StreamingVideoIntelligenceServiceClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // Some of the methods on this service provide streaming responses. + // Provide descriptors for these. + this.descriptors.stream = { + streamingAnnotateVideo: new this._gaxModule.StreamDescriptor(gax.StreamType.BIDI_STREAMING, opts.fallback === 'rest') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = gax.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.streamingVideoIntelligenceServiceStub) { + return this.streamingVideoIntelligenceServiceStub; + } + + // Put together the "service stub" for + // google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService. + this.streamingVideoIntelligenceServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const streamingVideoIntelligenceServiceStubMethods = + ['streamingAnnotateVideo']; + for (const methodName of streamingVideoIntelligenceServiceStubMethods) { + const callPromise = this.streamingVideoIntelligenceServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + if (methodName in this.descriptors.stream) { + const stream = new PassThrough(); + setImmediate(() => { + stream.emit('error', new GoogleError('The client has already been closed.')); + }); + return stream; + } + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.stream[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.streamingVideoIntelligenceServiceStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'videointelligence.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + +/** + * Performs video annotation with bidirectional streaming: emitting results + * while sending video/audio bytes. + * This method is only available via the gRPC API (not REST). + * + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which is both readable and writable. It accepts objects + * representing [StreamingAnnotateVideoRequest]{@link google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest} for write() method, and + * will emit objects representing [StreamingAnnotateVideoResponse]{@link google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse} on 'data' event asynchronously. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#bi-directional-streaming) + * for more details and examples. + * @example include:samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js + * region_tag:videointelligence_v1p3beta1_generated_StreamingVideoIntelligenceService_StreamingAnnotateVideo_async + */ + streamingAnnotateVideo( + options?: CallOptions): + gax.CancellableStream { + this.initialize(); + return this.innerApiCalls.streamingAnnotateVideo(null, options); + } + + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.streamingVideoIntelligenceServiceStub && !this._terminated) { + return this.streamingVideoIntelligenceServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client_config.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client_config.json new file mode 100644 index 00000000..b569dbe8 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client_config.json @@ -0,0 +1,31 @@ +{ + "interfaces": { + "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "StreamingAnnotateVideo": { + "timeout_millis": 10800000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_proto_list.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_proto_list.json new file mode 100644 index 00000000..85fbf375 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_proto_list.json @@ -0,0 +1,3 @@ +[ + "../../protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto" +] diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client.ts b/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client.ts new file mode 100644 index 00000000..a9deb59b --- /dev/null +++ b/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client.ts @@ -0,0 +1,443 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); +/** + * Client JSON configuration object, loaded from + * `src/v1p3beta1/video_intelligence_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './video_intelligence_service_client_config.json'; +import { operationsProtos } from 'google-gax'; +const version = require('../../../package.json').version; + +/** + * Service that implements the Video Intelligence API. + * @class + * @memberof v1p3beta1 + */ +export class VideoIntelligenceServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + operationsClient: gax.OperationsClient; + videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of VideoIntelligenceServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + */ + constructor(opts?: ClientOptions) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; + const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gax.fallback : gax; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = staticMembers.servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === staticMembers.servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process !== 'undefined' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [{selector: 'google.longrunning.Operations.ListOperations',get: '/v1p3beta1/{name=projects/*/locations/*}/operations',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1p3beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1p3beta1/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1p3beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1p3beta1/operations/{name=projects/*/locations/*/operations/*}',}], + },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1p3beta1/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1p3beta1/operations/{name=projects/*/locations/*/operations/*}:cancel',}], + }]; + } + this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); + const annotateVideoResponse = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse') as gax.protobuf.Type; + const annotateVideoMetadata = protoFilesRoot.lookup( + '.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress') as gax.protobuf.Type; + + this.descriptors.longrunning = { + annotateVideo: new this._gaxModule.LongrunningDescriptor( + this.operationsClient, + annotateVideoResponse.decode.bind(annotateVideoResponse), + annotateVideoMetadata.decode.bind(annotateVideoMetadata)) + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = gax.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.videoIntelligenceServiceStub) { + return this.videoIntelligenceServiceStub; + } + + // Put together the "service stub" for + // google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService. + this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const videoIntelligenceServiceStubMethods = + ['annotateVideo']; + for (const methodName of videoIntelligenceServiceStubMethods) { + const callPromise = this.videoIntelligenceServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.longrunning[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.videoIntelligenceServiceStub; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + return 'videointelligence.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + +/** + * Performs asynchronous video annotation. Progress and results can be + * retrieved through the `google.longrunning.Operations` interface. + * `Operation.metadata` contains `AnnotateVideoProgress` (progress). + * `Operation.response` contains `AnnotateVideoResponse` (results). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.inputUri + * Input video location. Currently, only + * [Cloud Storage](https://cloud.google.com/storage/) URIs are + * supported. URIs must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For + * more information, see [Request + * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify + * multiple videos, a video URI may include wildcards in the `object-id`. + * Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` must be unset. + * @param {Buffer} request.inputContent + * The video data bytes. + * If unset, the input video(s) should be specified via the `input_uri`. + * If set, `input_uri` must be unset. + * @param {number[]} request.features + * Required. Requested video annotation features. + * @param {google.cloud.videointelligence.v1p3beta1.VideoContext} request.videoContext + * Additional video context and/or feature-specific parameters. + * @param {string} [request.outputUri] + * Optional. Location where the output (in JSON format) should be stored. + * Currently, only [Cloud Storage](https://cloud.google.com/storage/) + * URIs are supported. These must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For + * more information, see [Request + * URIs](https://cloud.google.com/storage/docs/request-endpoints). + * @param {string} [request.locationId] + * Optional. Cloud region where annotation should take place. Supported cloud + * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + * region is specified, the region will be determined based on video file + * location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing + * a long running operation. Its `promise()` method returns a promise + * you can `await` for. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async + */ + annotateVideo( + request?: protos.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest, + options?: CallOptions): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>; + annotateVideo( + request: protos.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest, + options: CallOptions, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request: protos.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest, + callback: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): void; + annotateVideo( + request?: protos.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest, + optionsOrCallback?: CallOptions|Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>, + callback?: Callback< + LROperation, + protos.google.longrunning.IOperation|null|undefined, + {}|null|undefined>): + Promise<[ + LROperation, + protos.google.longrunning.IOperation|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.annotateVideo(request, options, callback); + } +/** + * Check the status of the long running operation returned by `annotateVideo()`. + * @param {String} name + * The operation name that will be passed. + * @returns {Promise} - The promise which resolves to an object. + * The decoded operation object has result and metadata field to get information from. + * Please see the + * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) + * for more details and examples. + * @example include:samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js + * region_tag:videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async + */ + async checkAnnotateVideoProgress(name: string): Promise>{ + const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); + const [operation] = await this.operationsClient.getOperation(request); + const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); + return decodeOperation as LROperation; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.videoIntelligenceServiceStub && !this._terminated) { + return this.videoIntelligenceServiceStub.then(stub => { + this._terminated = true; + stub.close(); + this.operationsClient.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client_config.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client_config.json new file mode 100644 index 00000000..c9796e48 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client_config.json @@ -0,0 +1,40 @@ +{ + "interfaces": { + "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + }, + "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.5, + "max_retry_delay_millis": 120000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "AnnotateVideo": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" + } + } + } + } +} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_proto_list.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_proto_list.json new file mode 100644 index 00000000..85fbf375 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_proto_list.json @@ -0,0 +1,3 @@ +[ + "../../protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto" +] diff --git a/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.js new file mode 100644 index 00000000..aafb91c9 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.js @@ -0,0 +1,28 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const videointelligence = require('@google-cloud/video-intelligence'); + +function main() { + const streamingVideoIntelligenceServiceClient = new videointelligence.StreamingVideoIntelligenceServiceClient(); + const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); +} + +main(); diff --git a/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.ts new file mode 100644 index 00000000..94514c94 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.ts @@ -0,0 +1,38 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {StreamingVideoIntelligenceServiceClient, VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; + +// check that the client class type name can be used +function doStuffWithStreamingVideoIntelligenceServiceClient(client: StreamingVideoIntelligenceServiceClient) { + client.close(); +} +function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const streamingVideoIntelligenceServiceClient = new StreamingVideoIntelligenceServiceClient(); + doStuffWithStreamingVideoIntelligenceServiceClient(streamingVideoIntelligenceServiceClient); + // check that the client instance can be created + const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); + doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); +} + +main(); diff --git a/owl-bot-staging/v1p3beta1/system-test/install.ts b/owl-bot-staging/v1p3beta1/system-test/install.ts new file mode 100644 index 00000000..8ec45222 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/system-test/install.ts @@ -0,0 +1,49 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import { packNTest } from 'pack-n-play'; +import { readFileSync } from 'fs'; +import { describe, it } from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/owl-bot-staging/v1p3beta1/test/gapic_streaming_video_intelligence_service_v1p3beta1.ts b/owl-bot-staging/v1p3beta1/test/gapic_streaming_video_intelligence_service_v1p3beta1.ts new file mode 100644 index 00000000..2a1b730c --- /dev/null +++ b/owl-bot-staging/v1p3beta1/test/gapic_streaming_video_intelligence_service_v1p3beta1.ts @@ -0,0 +1,195 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import { describe, it } from 'mocha'; +import * as streamingvideointelligenceserviceModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubBidiStreamingCall(response?: ResponseType, error?: Error) { + const transformStub = error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + return sinon.stub().returns(mockStream); +} + +describe('v1p3beta1.StreamingVideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.streamingVideoIntelligenceServiceStub, undefined); + await client.initialize(); + assert(client.streamingVideoIntelligenceServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.streamingVideoIntelligenceServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.streamingVideoIntelligenceServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + + describe('streamingAnnotateVideo', () => { + it('invokes streamingAnnotateVideo without error', async () => { + const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest()); + const expectedResponse = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse()); + client.innerApiCalls.streamingAnnotateVideo = stubBidiStreamingCall(expectedResponse); + const stream = client.streamingAnnotateVideo(); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + stream.write(request); + stream.end(); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.streamingAnnotateVideo as SinonStub) + .getCall(0).calledWith(null)); + assert.deepStrictEqual(((stream as unknown as PassThrough) + ._transform as SinonStub).getCall(0).args[0], request); + }); + + it('invokes streamingAnnotateVideo with error', async () => { + const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest()); + const expectedError = new Error('expected'); + client.innerApiCalls.streamingAnnotateVideo = stubBidiStreamingCall(undefined, expectedError); + const stream = client.streamingAnnotateVideo(); + const promise = new Promise((resolve, reject) => { + stream.on('data', (response: protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse) => { + resolve(response); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + stream.write(request); + stream.end(); + }); + await assert.rejects(promise, expectedError); + assert((client.innerApiCalls.streamingAnnotateVideo as SinonStub) + .getCall(0).calledWith(null)); + assert.deepStrictEqual(((stream as unknown as PassThrough) + ._transform as SinonStub).getCall(0).args[0], request); + }); + }); +}); diff --git a/owl-bot-staging/v1p3beta1/test/gapic_video_intelligence_service_v1p3beta1.ts b/owl-bot-staging/v1p3beta1/test/gapic_video_intelligence_service_v1p3beta1.ts new file mode 100644 index 00000000..fc99736f --- /dev/null +++ b/owl-bot-staging/v1p3beta1/test/gapic_video_intelligence_service_v1p3beta1.ts @@ -0,0 +1,259 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import { describe, it } from 'mocha'; +import * as videointelligenceserviceModule from '../src'; + +import {protobuf, LROperation, operationsProtos} from 'google-gax'; + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); +} + +function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { + const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); + const mockOperation = { + promise: innerStub, + }; + return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); +} + +describe('v1p3beta1.VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + await client.initialize(); + assert(client.videoIntelligenceServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.videoIntelligenceServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.videoIntelligenceServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + + describe('annotateVideo', () => { + it('invokes annotateVideo without error', async () => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); + const [operation] = await client.annotateVideo(request); + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo without error using callback', async () => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); + client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.annotateVideo( + request, + (err?: Error|null, + result?: LROperation|null + ) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const operation = await promise as LROperation; + const [response] = await operation.promise(); + assert.deepStrictEqual(response, expectedResponse); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); + }); + + it('invokes annotateVideo with call error', async () => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); + await assert.rejects(client.annotateVideo(request), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes annotateVideo with LRO error', async () => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest()); + const expectedOptions = {otherArgs: {headers: {}}};; + const expectedError = new Error('expected'); + client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); + const [operation] = await client.annotateVideo(request); + await assert.rejects(operation.promise(), expectedError); + assert((client.innerApiCalls.annotateVideo as SinonStub) + .getCall(0).calledWith(request, expectedOptions, undefined)); + }); + + it('invokes checkAnnotateVideoProgress without error', async () => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); + expectedResponse.name = 'test'; + expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; + expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} + + client.operationsClient.getOperation = stubSimpleCall(expectedResponse); + const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); + assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); + assert(decodedOperation.metadata); + assert((client.operationsClient.getOperation as SinonStub).getCall(0)); + }); + + it('invokes checkAnnotateVideoProgress with error', async () => { + const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const expectedError = new Error('expected'); + + client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); + assert((client.operationsClient.getOperation as SinonStub) + .getCall(0)); + }); + }); +}); diff --git a/owl-bot-staging/v1p3beta1/tsconfig.json b/owl-bot-staging/v1p3beta1/tsconfig.json new file mode 100644 index 00000000..c78f1c88 --- /dev/null +++ b/owl-bot-staging/v1p3beta1/tsconfig.json @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/owl-bot-staging/v1p3beta1/webpack.config.js b/owl-bot-staging/v1p3beta1/webpack.config.js new file mode 100644 index 00000000..9657601b --- /dev/null +++ b/owl-bot-staging/v1p3beta1/webpack.config.js @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'videointelligence', + filename: './videointelligence.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; From e5fcf60a5c909ddf7f7c6d039fa68fe81b956fad Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Fri, 24 Jun 2022 09:55:44 +0000 Subject: [PATCH 2/2] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot=20po?= =?UTF-8?q?st-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- owl-bot-staging/v1/.eslintignore | 7 - owl-bot-staging/v1/.eslintrc.json | 3 - owl-bot-staging/v1/.gitignore | 14 - owl-bot-staging/v1/.jsdoc.js | 55 - owl-bot-staging/v1/.mocharc.js | 33 - owl-bot-staging/v1/.prettierrc.js | 22 - owl-bot-staging/v1/README.md | 1 - owl-bot-staging/v1/linkinator.config.json | 16 - owl-bot-staging/v1/package.json | 64 - .../v1/video_intelligence.proto | 906 -------------- ...ata.google.cloud.videointelligence.v1.json | 75 -- ...deo_intelligence_service.annotate_video.js | 100 -- owl-bot-staging/v1/src/index.ts | 25 - owl-bot-staging/v1/src/v1/gapic_metadata.json | 33 - owl-bot-staging/v1/src/v1/index.ts | 19 - .../v1/video_intelligence_service_client.ts | 443 ------- ...eo_intelligence_service_client_config.json | 40 - ...video_intelligence_service_proto_list.json | 3 - .../system-test/fixtures/sample/src/index.js | 27 - .../system-test/fixtures/sample/src/index.ts | 32 - owl-bot-staging/v1/system-test/install.ts | 49 - .../gapic_video_intelligence_service_v1.ts | 259 ---- owl-bot-staging/v1/tsconfig.json | 19 - owl-bot-staging/v1/webpack.config.js | 64 - owl-bot-staging/v1beta2/.eslintignore | 7 - owl-bot-staging/v1beta2/.eslintrc.json | 3 - owl-bot-staging/v1beta2/.gitignore | 14 - owl-bot-staging/v1beta2/.jsdoc.js | 55 - owl-bot-staging/v1beta2/.mocharc.js | 33 - owl-bot-staging/v1beta2/.prettierrc.js | 22 - owl-bot-staging/v1beta2/README.md | 1 - .../v1beta2/linkinator.config.json | 16 - owl-bot-staging/v1beta2/package.json | 64 - .../v1beta2/video_intelligence.proto | 410 ------- ...oogle.cloud.videointelligence.v1beta2.json | 75 -- ...deo_intelligence_service.annotate_video.js | 99 -- owl-bot-staging/v1beta2/src/index.ts | 25 - .../v1beta2/src/v1beta2/gapic_metadata.json | 33 - owl-bot-staging/v1beta2/src/v1beta2/index.ts | 19 - .../video_intelligence_service_client.ts | 442 ------- ...eo_intelligence_service_client_config.json | 40 - ...video_intelligence_service_proto_list.json | 3 - .../system-test/fixtures/sample/src/index.js | 27 - .../system-test/fixtures/sample/src/index.ts | 32 - .../v1beta2/system-test/install.ts | 49 - ...apic_video_intelligence_service_v1beta2.ts | 259 ---- owl-bot-staging/v1beta2/tsconfig.json | 19 - owl-bot-staging/v1beta2/webpack.config.js | 64 - owl-bot-staging/v1p1beta1/.eslintignore | 7 - owl-bot-staging/v1p1beta1/.eslintrc.json | 3 - owl-bot-staging/v1p1beta1/.gitignore | 14 - owl-bot-staging/v1p1beta1/.jsdoc.js | 55 - owl-bot-staging/v1p1beta1/.mocharc.js | 33 - owl-bot-staging/v1p1beta1/.prettierrc.js | 22 - owl-bot-staging/v1p1beta1/README.md | 1 - .../v1p1beta1/linkinator.config.json | 16 - owl-bot-staging/v1p1beta1/package.json | 64 - .../v1p1beta1/video_intelligence.proto | 450 ------- ...gle.cloud.videointelligence.v1p1beta1.json | 75 -- ...deo_intelligence_service.annotate_video.js | 99 -- owl-bot-staging/v1p1beta1/src/index.ts | 25 - .../src/v1p1beta1/gapic_metadata.json | 33 - .../v1p1beta1/src/v1p1beta1/index.ts | 19 - .../video_intelligence_service_client.ts | 442 ------- ...eo_intelligence_service_client_config.json | 40 - ...video_intelligence_service_proto_list.json | 3 - .../system-test/fixtures/sample/src/index.js | 27 - .../system-test/fixtures/sample/src/index.ts | 32 - .../v1p1beta1/system-test/install.ts | 49 - ...ic_video_intelligence_service_v1p1beta1.ts | 259 ---- owl-bot-staging/v1p1beta1/tsconfig.json | 19 - owl-bot-staging/v1p1beta1/webpack.config.js | 64 - owl-bot-staging/v1p2beta1/.eslintignore | 7 - owl-bot-staging/v1p2beta1/.eslintrc.json | 3 - owl-bot-staging/v1p2beta1/.gitignore | 14 - owl-bot-staging/v1p2beta1/.jsdoc.js | 55 - owl-bot-staging/v1p2beta1/.mocharc.js | 33 - owl-bot-staging/v1p2beta1/.prettierrc.js | 22 - owl-bot-staging/v1p2beta1/README.md | 1 - .../v1p2beta1/linkinator.config.json | 16 - owl-bot-staging/v1p2beta1/package.json | 64 - .../v1p2beta1/video_intelligence.proto | 489 -------- ...gle.cloud.videointelligence.v1p2beta1.json | 75 -- ...deo_intelligence_service.annotate_video.js | 97 -- owl-bot-staging/v1p2beta1/src/index.ts | 25 - .../src/v1p2beta1/gapic_metadata.json | 33 - .../v1p2beta1/src/v1p2beta1/index.ts | 19 - .../video_intelligence_service_client.ts | 440 ------- ...eo_intelligence_service_client_config.json | 40 - ...video_intelligence_service_proto_list.json | 3 - .../system-test/fixtures/sample/src/index.js | 27 - .../system-test/fixtures/sample/src/index.ts | 32 - .../v1p2beta1/system-test/install.ts | 49 - ...ic_video_intelligence_service_v1p2beta1.ts | 259 ---- owl-bot-staging/v1p2beta1/tsconfig.json | 19 - owl-bot-staging/v1p2beta1/webpack.config.js | 64 - owl-bot-staging/v1p3beta1/.eslintignore | 7 - owl-bot-staging/v1p3beta1/.eslintrc.json | 3 - owl-bot-staging/v1p3beta1/.gitignore | 14 - owl-bot-staging/v1p3beta1/.jsdoc.js | 55 - owl-bot-staging/v1p3beta1/.mocharc.js | 33 - owl-bot-staging/v1p3beta1/.prettierrc.js | 22 - owl-bot-staging/v1p3beta1/README.md | 1 - .../v1p3beta1/linkinator.config.json | 16 - owl-bot-staging/v1p3beta1/package.json | 65 - .../v1p3beta1/video_intelligence.proto | 1090 ----------------- ...gle.cloud.videointelligence.v1p3beta1.json | 119 -- ...igence_service.streaming_annotate_video.js | 73 -- ...deo_intelligence_service.annotate_video.js | 100 -- owl-bot-staging/v1p3beta1/src/index.ts | 27 - .../src/v1p3beta1/gapic_metadata.json | 51 - .../v1p3beta1/src/v1p3beta1/index.ts | 20 - ...aming_video_intelligence_service_client.ts | 331 ----- ...eo_intelligence_service_client_config.json | 31 - ...video_intelligence_service_proto_list.json | 3 - .../video_intelligence_service_client.ts | 443 ------- ...eo_intelligence_service_client_config.json | 40 - ...video_intelligence_service_proto_list.json | 3 - .../system-test/fixtures/sample/src/index.js | 28 - .../system-test/fixtures/sample/src/index.ts | 38 - .../v1p3beta1/system-test/install.ts | 49 - ...ng_video_intelligence_service_v1p3beta1.ts | 195 --- ...ic_video_intelligence_service_v1p3beta1.ts | 259 ---- owl-bot-staging/v1p3beta1/tsconfig.json | 19 - owl-bot-staging/v1p3beta1/webpack.config.js | 64 - src/v1/video_intelligence_service_client.ts | 59 +- .../video_intelligence_service_client.ts | 61 +- .../video_intelligence_service_client.ts | 61 +- .../video_intelligence_service_client.ts | 61 +- ...aming_video_intelligence_service_client.ts | 11 +- .../video_intelligence_service_client.ts | 61 +- 131 files changed, 248 insertions(+), 11122 deletions(-) delete mode 100644 owl-bot-staging/v1/.eslintignore delete mode 100644 owl-bot-staging/v1/.eslintrc.json delete mode 100644 owl-bot-staging/v1/.gitignore delete mode 100644 owl-bot-staging/v1/.jsdoc.js delete mode 100644 owl-bot-staging/v1/.mocharc.js delete mode 100644 owl-bot-staging/v1/.prettierrc.js delete mode 100644 owl-bot-staging/v1/README.md delete mode 100644 owl-bot-staging/v1/linkinator.config.json delete mode 100644 owl-bot-staging/v1/package.json delete mode 100644 owl-bot-staging/v1/protos/google/cloud/videointelligence/v1/video_intelligence.proto delete mode 100644 owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.videointelligence.v1.json delete mode 100644 owl-bot-staging/v1/samples/generated/v1/video_intelligence_service.annotate_video.js delete mode 100644 owl-bot-staging/v1/src/index.ts delete mode 100644 owl-bot-staging/v1/src/v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/src/v1/index.ts delete mode 100644 owl-bot-staging/v1/src/v1/video_intelligence_service_client.ts delete mode 100644 owl-bot-staging/v1/src/v1/video_intelligence_service_client_config.json delete mode 100644 owl-bot-staging/v1/src/v1/video_intelligence_service_proto_list.json delete mode 100644 owl-bot-staging/v1/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/v1/system-test/install.ts delete mode 100644 owl-bot-staging/v1/test/gapic_video_intelligence_service_v1.ts delete mode 100644 owl-bot-staging/v1/tsconfig.json delete mode 100644 owl-bot-staging/v1/webpack.config.js delete mode 100644 owl-bot-staging/v1beta2/.eslintignore delete mode 100644 owl-bot-staging/v1beta2/.eslintrc.json delete mode 100644 owl-bot-staging/v1beta2/.gitignore delete mode 100644 owl-bot-staging/v1beta2/.jsdoc.js delete mode 100644 owl-bot-staging/v1beta2/.mocharc.js delete mode 100644 owl-bot-staging/v1beta2/.prettierrc.js delete mode 100644 owl-bot-staging/v1beta2/README.md delete mode 100644 owl-bot-staging/v1beta2/linkinator.config.json delete mode 100644 owl-bot-staging/v1beta2/package.json delete mode 100644 owl-bot-staging/v1beta2/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto delete mode 100644 owl-bot-staging/v1beta2/samples/generated/v1beta2/snippet_metadata.google.cloud.videointelligence.v1beta2.json delete mode 100644 owl-bot-staging/v1beta2/samples/generated/v1beta2/video_intelligence_service.annotate_video.js delete mode 100644 owl-bot-staging/v1beta2/src/index.ts delete mode 100644 owl-bot-staging/v1beta2/src/v1beta2/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta2/src/v1beta2/index.ts delete mode 100644 owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client.ts delete mode 100644 owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client_config.json delete mode 100644 owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_proto_list.json delete mode 100644 owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/v1beta2/system-test/install.ts delete mode 100644 owl-bot-staging/v1beta2/test/gapic_video_intelligence_service_v1beta2.ts delete mode 100644 owl-bot-staging/v1beta2/tsconfig.json delete mode 100644 owl-bot-staging/v1beta2/webpack.config.js delete mode 100644 owl-bot-staging/v1p1beta1/.eslintignore delete mode 100644 owl-bot-staging/v1p1beta1/.eslintrc.json delete mode 100644 owl-bot-staging/v1p1beta1/.gitignore delete mode 100644 owl-bot-staging/v1p1beta1/.jsdoc.js delete mode 100644 owl-bot-staging/v1p1beta1/.mocharc.js delete mode 100644 owl-bot-staging/v1p1beta1/.prettierrc.js delete mode 100644 owl-bot-staging/v1p1beta1/README.md delete mode 100644 owl-bot-staging/v1p1beta1/linkinator.config.json delete mode 100644 owl-bot-staging/v1p1beta1/package.json delete mode 100644 owl-bot-staging/v1p1beta1/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto delete mode 100644 owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/snippet_metadata.google.cloud.videointelligence.v1p1beta1.json delete mode 100644 owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js delete mode 100644 owl-bot-staging/v1p1beta1/src/index.ts delete mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/index.ts delete mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client.ts delete mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client_config.json delete mode 100644 owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_proto_list.json delete mode 100644 owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/v1p1beta1/system-test/install.ts delete mode 100644 owl-bot-staging/v1p1beta1/test/gapic_video_intelligence_service_v1p1beta1.ts delete mode 100644 owl-bot-staging/v1p1beta1/tsconfig.json delete mode 100644 owl-bot-staging/v1p1beta1/webpack.config.js delete mode 100644 owl-bot-staging/v1p2beta1/.eslintignore delete mode 100644 owl-bot-staging/v1p2beta1/.eslintrc.json delete mode 100644 owl-bot-staging/v1p2beta1/.gitignore delete mode 100644 owl-bot-staging/v1p2beta1/.jsdoc.js delete mode 100644 owl-bot-staging/v1p2beta1/.mocharc.js delete mode 100644 owl-bot-staging/v1p2beta1/.prettierrc.js delete mode 100644 owl-bot-staging/v1p2beta1/README.md delete mode 100644 owl-bot-staging/v1p2beta1/linkinator.config.json delete mode 100644 owl-bot-staging/v1p2beta1/package.json delete mode 100644 owl-bot-staging/v1p2beta1/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto delete mode 100644 owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/snippet_metadata.google.cloud.videointelligence.v1p2beta1.json delete mode 100644 owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js delete mode 100644 owl-bot-staging/v1p2beta1/src/index.ts delete mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/index.ts delete mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client.ts delete mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client_config.json delete mode 100644 owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_proto_list.json delete mode 100644 owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/v1p2beta1/system-test/install.ts delete mode 100644 owl-bot-staging/v1p2beta1/test/gapic_video_intelligence_service_v1p2beta1.ts delete mode 100644 owl-bot-staging/v1p2beta1/tsconfig.json delete mode 100644 owl-bot-staging/v1p2beta1/webpack.config.js delete mode 100644 owl-bot-staging/v1p3beta1/.eslintignore delete mode 100644 owl-bot-staging/v1p3beta1/.eslintrc.json delete mode 100644 owl-bot-staging/v1p3beta1/.gitignore delete mode 100644 owl-bot-staging/v1p3beta1/.jsdoc.js delete mode 100644 owl-bot-staging/v1p3beta1/.mocharc.js delete mode 100644 owl-bot-staging/v1p3beta1/.prettierrc.js delete mode 100644 owl-bot-staging/v1p3beta1/README.md delete mode 100644 owl-bot-staging/v1p3beta1/linkinator.config.json delete mode 100644 owl-bot-staging/v1p3beta1/package.json delete mode 100644 owl-bot-staging/v1p3beta1/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto delete mode 100644 owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/snippet_metadata.google.cloud.videointelligence.v1p3beta1.json delete mode 100644 owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js delete mode 100644 owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js delete mode 100644 owl-bot-staging/v1p3beta1/src/index.ts delete mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/index.ts delete mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client.ts delete mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client_config.json delete mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_proto_list.json delete mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client.ts delete mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client_config.json delete mode 100644 owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_proto_list.json delete mode 100644 owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.js delete mode 100644 owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.ts delete mode 100644 owl-bot-staging/v1p3beta1/system-test/install.ts delete mode 100644 owl-bot-staging/v1p3beta1/test/gapic_streaming_video_intelligence_service_v1p3beta1.ts delete mode 100644 owl-bot-staging/v1p3beta1/test/gapic_video_intelligence_service_v1p3beta1.ts delete mode 100644 owl-bot-staging/v1p3beta1/tsconfig.json delete mode 100644 owl-bot-staging/v1p3beta1/webpack.config.js diff --git a/owl-bot-staging/v1/.eslintignore b/owl-bot-staging/v1/.eslintignore deleted file mode 100644 index cfc348ec..00000000 --- a/owl-bot-staging/v1/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/v1/.eslintrc.json b/owl-bot-staging/v1/.eslintrc.json deleted file mode 100644 index 78215349..00000000 --- a/owl-bot-staging/v1/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/v1/.gitignore b/owl-bot-staging/v1/.gitignore deleted file mode 100644 index 5d32b237..00000000 --- a/owl-bot-staging/v1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/v1/.jsdoc.js b/owl-bot-staging/v1/.jsdoc.js deleted file mode 100644 index 6c816e68..00000000 --- a/owl-bot-staging/v1/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/video-intelligence', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/v1/.mocharc.js b/owl-bot-staging/v1/.mocharc.js deleted file mode 100644 index 481c522b..00000000 --- a/owl-bot-staging/v1/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/v1/.prettierrc.js b/owl-bot-staging/v1/.prettierrc.js deleted file mode 100644 index 494e1478..00000000 --- a/owl-bot-staging/v1/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/v1/README.md b/owl-bot-staging/v1/README.md deleted file mode 100644 index d1c53e8c..00000000 --- a/owl-bot-staging/v1/README.md +++ /dev/null @@ -1 +0,0 @@ -Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1/linkinator.config.json b/owl-bot-staging/v1/linkinator.config.json deleted file mode 100644 index befd23c8..00000000 --- a/owl-bot-staging/v1/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/v1/package.json b/owl-bot-staging/v1/package.json deleted file mode 100644 index 6b17fa2c..00000000 --- a/owl-bot-staging/v1/package.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "name": "@google-cloud/video-intelligence", - "version": "0.1.0", - "description": "Videointelligence client for Node.js", - "repository": "googleapis/nodejs-videointelligence", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google videointelligence", - "videointelligence", - "video intelligence service" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^3.1.1" - }, - "devDependencies": { - "@types/mocha": "^9.1.0", - "@types/node": "^16.0.0", - "@types/sinon": "^10.0.8", - "c8": "^7.11.0", - "gts": "^3.1.0", - "jsdoc": "^3.6.7", - "jsdoc-fresh": "^1.1.1", - "jsdoc-region-tag": "^1.3.1", - "linkinator": "^3.0.0", - "mocha": "^9.1.4", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^13.0.0", - "ts-loader": "^9.2.6", - "typescript": "^4.5.5", - "webpack": "^5.67.0", - "webpack-cli": "^4.9.1" - }, - "engines": { - "node": ">=v12" - } -} diff --git a/owl-bot-staging/v1/protos/google/cloud/videointelligence/v1/video_intelligence.proto b/owl-bot-staging/v1/protos/google/cloud/videointelligence/v1/video_intelligence.proto deleted file mode 100644 index 648ec475..00000000 --- a/owl-bot-staging/v1/protos/google/cloud/videointelligence/v1/video_intelligence.proto +++ /dev/null @@ -1,906 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.videointelligence.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.VideoIntelligence.V1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence"; -option java_multiple_files = true; -option java_outer_classname = "VideoIntelligenceServiceProto"; -option java_package = "com.google.cloud.videointelligence.v1"; -option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1"; -option ruby_package = "Google::Cloud::VideoIntelligence::V1"; - -// Service that implements the Video Intelligence API. -service VideoIntelligenceService { - option (google.api.default_host) = "videointelligence.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; - - // Performs asynchronous video annotation. Progress and results can be - // retrieved through the `google.longrunning.Operations` interface. - // `Operation.metadata` contains `AnnotateVideoProgress` (progress). - // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/videos:annotate" - body: "*" - }; - option (google.api.method_signature) = "input_uri,features"; - option (google.longrunning.operation_info) = { - response_type: "AnnotateVideoResponse" - metadata_type: "AnnotateVideoProgress" - }; - } -} - -// Video annotation request. -message AnnotateVideoRequest { - // Input video location. Currently, only - // [Cloud Storage](https://cloud.google.com/storage/) URIs are - // supported. URIs must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request - // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify - // multiple videos, a video URI may include wildcards in the `object-id`. - // Supported wildcards: '*' to match 0 or more characters; - // '?' to match 1 character. If unset, the input video should be embedded - // in the request as `input_content`. If set, `input_content` must be unset. - string input_uri = 1; - - // The video data bytes. - // If unset, the input video(s) should be specified via the `input_uri`. - // If set, `input_uri` must be unset. - bytes input_content = 6; - - // Required. Requested video annotation features. - repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; - - // Additional video context and/or feature-specific parameters. - VideoContext video_context = 3; - - // Optional. Location where the output (in JSON format) should be stored. - // Currently, only [Cloud Storage](https://cloud.google.com/storage/) - // URIs are supported. These must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request - // URIs](https://cloud.google.com/storage/docs/request-endpoints). - string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Cloud region where annotation should take place. Supported cloud - // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no - // region is specified, the region will be determined based on video file - // location. - string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; -} - -// Video context and/or feature-specific parameters. -message VideoContext { - // Video segments to annotate. The segments may overlap and are not required - // to be contiguous or span the whole video. If unspecified, each video is - // treated as a single segment. - repeated VideoSegment segments = 1; - - // Config for LABEL_DETECTION. - LabelDetectionConfig label_detection_config = 2; - - // Config for SHOT_CHANGE_DETECTION. - ShotChangeDetectionConfig shot_change_detection_config = 3; - - // Config for EXPLICIT_CONTENT_DETECTION. - ExplicitContentDetectionConfig explicit_content_detection_config = 4; - - // Config for FACE_DETECTION. - FaceDetectionConfig face_detection_config = 5; - - // Config for SPEECH_TRANSCRIPTION. - SpeechTranscriptionConfig speech_transcription_config = 6; - - // Config for TEXT_DETECTION. - TextDetectionConfig text_detection_config = 8; - - // Config for PERSON_DETECTION. - PersonDetectionConfig person_detection_config = 11; - - // Config for OBJECT_TRACKING. - ObjectTrackingConfig object_tracking_config = 13; -} - -// Video annotation feature. -enum Feature { - // Unspecified. - FEATURE_UNSPECIFIED = 0; - - // Label detection. Detect objects, such as dog or flower. - LABEL_DETECTION = 1; - - // Shot change detection. - SHOT_CHANGE_DETECTION = 2; - - // Explicit content detection. - EXPLICIT_CONTENT_DETECTION = 3; - - // Human face detection. - FACE_DETECTION = 4; - - // Speech transcription. - SPEECH_TRANSCRIPTION = 6; - - // OCR text detection and tracking. - TEXT_DETECTION = 7; - - // Object detection and tracking. - OBJECT_TRACKING = 9; - - // Logo detection, tracking, and recognition. - LOGO_RECOGNITION = 12; - - // Person detection. - PERSON_DETECTION = 14; -} - -// Label detection mode. -enum LabelDetectionMode { - // Unspecified. - LABEL_DETECTION_MODE_UNSPECIFIED = 0; - - // Detect shot-level labels. - SHOT_MODE = 1; - - // Detect frame-level labels. - FRAME_MODE = 2; - - // Detect both shot-level and frame-level labels. - SHOT_AND_FRAME_MODE = 3; -} - -// Bucketized representation of likelihood. -enum Likelihood { - // Unspecified likelihood. - LIKELIHOOD_UNSPECIFIED = 0; - - // Very unlikely. - VERY_UNLIKELY = 1; - - // Unlikely. - UNLIKELY = 2; - - // Possible. - POSSIBLE = 3; - - // Likely. - LIKELY = 4; - - // Very likely. - VERY_LIKELY = 5; -} - -// Config for LABEL_DETECTION. -message LabelDetectionConfig { - // What labels should be detected with LABEL_DETECTION, in addition to - // video-level labels or segment-level labels. - // If unspecified, defaults to `SHOT_MODE`. - LabelDetectionMode label_detection_mode = 1; - - // Whether the video has been shot from a stationary (i.e., non-moving) - // camera. When set to true, might improve detection accuracy for moving - // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. - bool stationary_camera = 2; - - // Model to use for label detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 3; - - // The confidence threshold we perform filtering on the labels from - // frame-level detection. If not set, it is set to 0.4 by default. The valid - // range for this threshold is [0.1, 0.9]. Any value set outside of this - // range will be clipped. - // Note: For best results, follow the default threshold. We will update - // the default threshold everytime when we release a new model. - float frame_confidence_threshold = 4; - - // The confidence threshold we perform filtering on the labels from - // video-level and shot-level detections. If not set, it's set to 0.3 by - // default. The valid range for this threshold is [0.1, 0.9]. Any value set - // outside of this range will be clipped. - // Note: For best results, follow the default threshold. We will update - // the default threshold everytime when we release a new model. - float video_confidence_threshold = 5; -} - -// Config for SHOT_CHANGE_DETECTION. -message ShotChangeDetectionConfig { - // Model to use for shot change detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for OBJECT_TRACKING. -message ObjectTrackingConfig { - // Model to use for object tracking. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for FACE_DETECTION. -message FaceDetectionConfig { - // Model to use for face detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; - - // Whether bounding boxes are included in the face annotation output. - bool include_bounding_boxes = 2; - - // Whether to enable face attributes detection, such as glasses, dark_glasses, - // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false. - bool include_attributes = 5; -} - -// Config for PERSON_DETECTION. -message PersonDetectionConfig { - // Whether bounding boxes are included in the person detection annotation - // output. - bool include_bounding_boxes = 1; - - // Whether to enable pose landmarks detection. Ignored if - // 'include_bounding_boxes' is set to false. - bool include_pose_landmarks = 2; - - // Whether to enable person attributes detection, such as cloth color (black, - // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, - // etc. - // Ignored if 'include_bounding_boxes' is set to false. - bool include_attributes = 3; -} - -// Config for EXPLICIT_CONTENT_DETECTION. -message ExplicitContentDetectionConfig { - // Model to use for explicit content detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for TEXT_DETECTION. -message TextDetectionConfig { - // Language hint can be specified if the language to be detected is known a - // priori. It can increase the accuracy of the detection. Language hint must - // be language code in BCP-47 format. - // - // Automatic language detection is performed if no hint is provided. - repeated string language_hints = 1; - - // Model to use for text detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 2; -} - -// Video segment. -message VideoSegment { - // Time-offset, relative to the beginning of the video, - // corresponding to the start of the segment (inclusive). - google.protobuf.Duration start_time_offset = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the end of the segment (inclusive). - google.protobuf.Duration end_time_offset = 2; -} - -// Video segment level annotation results for label detection. -message LabelSegment { - // Video segment where a label was detected. - VideoSegment segment = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Video frame level annotation results for label detection. -message LabelFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Detected entity from video analysis. -message Entity { - // Opaque entity ID. Some IDs may be available in - // [Google Knowledge Graph Search - // API](https://developers.google.com/knowledge-graph/). - string entity_id = 1; - - // Textual description, e.g., `Fixed-gear bicycle`. - string description = 2; - - // Language code for `description` in BCP-47 format. - string language_code = 3; -} - -// Label annotation. -message LabelAnnotation { - // Detected entity. - Entity entity = 1; - - // Common categories for the detected entity. - // For example, when the label is `Terrier`, the category is likely `dog`. And - // in some cases there might be more than one categories e.g., `Terrier` could - // also be a `pet`. - repeated Entity category_entities = 2; - - // All video segments where a label was detected. - repeated LabelSegment segments = 3; - - // All video frames where a label was detected. - repeated LabelFrame frames = 4; - - // Feature version. - string version = 5; -} - -// Video frame level annotation results for explicit content. -message ExplicitContentFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Likelihood of the pornography content.. - Likelihood pornography_likelihood = 2; -} - -// Explicit content annotation (based on per-frame visual signals only). -// If no explicit content has been detected in a frame, no annotations are -// present for that frame. -message ExplicitContentAnnotation { - // All video frames where explicit content was detected. - repeated ExplicitContentFrame frames = 1; - - // Feature version. - string version = 2; -} - -// Normalized bounding box. -// The normalized vertex coordinates are relative to the original image. -// Range: [0, 1]. -message NormalizedBoundingBox { - // Left X coordinate. - float left = 1; - - // Top Y coordinate. - float top = 2; - - // Right X coordinate. - float right = 3; - - // Bottom Y coordinate. - float bottom = 4; -} - -// Face detection annotation. -message FaceDetectionAnnotation { - // The face tracks with attributes. - repeated Track tracks = 3; - - // The thumbnail of a person's face. - bytes thumbnail = 4; - - // Feature version. - string version = 5; -} - -// Person detection annotation per video. -message PersonDetectionAnnotation { - // The detected tracks of a person. - repeated Track tracks = 1; - - // Feature version. - string version = 2; -} - -// Video segment level annotation results for face detection. -message FaceSegment { - // Video segment where a face was detected. - VideoSegment segment = 1; -} - -// Deprecated. No effect. -message FaceFrame { - option deprecated = true; - - // Normalized Bounding boxes in a frame. - // There can be more than one boxes if the same face is detected in multiple - // locations within the current frame. - repeated NormalizedBoundingBox normalized_bounding_boxes = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the video frame for this location. - google.protobuf.Duration time_offset = 2; -} - -// Deprecated. No effect. -message FaceAnnotation { - option deprecated = true; - - // Thumbnail of a representative face view (in JPEG format). - bytes thumbnail = 1; - - // All video segments where a face was detected. - repeated FaceSegment segments = 2; - - // All video frames where a face was detected. - repeated FaceFrame frames = 3; -} - -// For tracking related features. -// An object at time_offset with attributes, and located with -// normalized_bounding_box. -message TimestampedObject { - // Normalized Bounding box in a frame, where the object is located. - NormalizedBoundingBox normalized_bounding_box = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the video frame for this object. - google.protobuf.Duration time_offset = 2; - - // Optional. The attributes of the object in the bounding box. - repeated DetectedAttribute attributes = 3 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The detected landmarks. - repeated DetectedLandmark landmarks = 4 - [(google.api.field_behavior) = OPTIONAL]; -} - -// A track of an object instance. -message Track { - // Video segment of a track. - VideoSegment segment = 1; - - // The object with timestamp and attributes per frame in the track. - repeated TimestampedObject timestamped_objects = 2; - - // Optional. Attributes in the track level. - repeated DetectedAttribute attributes = 3 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The confidence score of the tracked object. - float confidence = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// A generic detected attribute represented by name in string format. -message DetectedAttribute { - // The name of the attribute, for example, glasses, dark_glasses, mouth_open. - // A full list of supported type names will be provided in the document. - string name = 1; - - // Detected attribute confidence. Range [0, 1]. - float confidence = 2; - - // Text value of the detection result. For example, the value for "HairColor" - // can be "black", "blonde", etc. - string value = 3; -} - -// A generic detected landmark represented by name in string format and a 2D -// location. -message DetectedLandmark { - // The name of this landmark, for example, left_hand, right_shoulder. - string name = 1; - - // The 2D point of the detected landmark using the normalized image - // coordindate system. The normalized coordinates have the range from 0 to 1. - NormalizedVertex point = 2; - - // The confidence score of the detected landmark. Range [0, 1]. - float confidence = 3; -} - -// Annotation results for a single video. -message VideoAnnotationResults { - // Video file location in - // [Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Video segment on which the annotation is run. - VideoSegment segment = 10; - - // Topical label annotations on video level or user-specified segment level. - // There is exactly one element for each unique label. - repeated LabelAnnotation segment_label_annotations = 2; - - // Presence label annotations on video level or user-specified segment level. - // There is exactly one element for each unique label. Compared to the - // existing topical `segment_label_annotations`, this field presents more - // fine-grained, segment-level labels detected in video content and is made - // available only when the client sets `LabelDetectionConfig.model` to - // "builtin/latest" in the request. - repeated LabelAnnotation segment_presence_label_annotations = 23; - - // Topical label annotations on shot level. - // There is exactly one element for each unique label. - repeated LabelAnnotation shot_label_annotations = 3; - - // Presence label annotations on shot level. There is exactly one element for - // each unique label. Compared to the existing topical - // `shot_label_annotations`, this field presents more fine-grained, shot-level - // labels detected in video content and is made available only when the client - // sets `LabelDetectionConfig.model` to "builtin/latest" in the request. - repeated LabelAnnotation shot_presence_label_annotations = 24; - - // Label annotations on frame level. - // There is exactly one element for each unique label. - repeated LabelAnnotation frame_label_annotations = 4; - - // Deprecated. Please use `face_detection_annotations` instead. - repeated FaceAnnotation face_annotations = 5 [deprecated = true]; - - // Face detection annotations. - repeated FaceDetectionAnnotation face_detection_annotations = 13; - - // Shot annotations. Each shot is represented as a video segment. - repeated VideoSegment shot_annotations = 6; - - // Explicit content annotation. - ExplicitContentAnnotation explicit_annotation = 7; - - // Speech transcription. - repeated SpeechTranscription speech_transcriptions = 11; - - // OCR text detection and tracking. - // Annotations for list of detected text snippets. Each will have list of - // frame information associated with it. - repeated TextAnnotation text_annotations = 12; - - // Annotations for list of objects detected and tracked in video. - repeated ObjectTrackingAnnotation object_annotations = 14; - - // Annotations for list of logos detected, tracked and recognized in video. - repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; - - // Person detection annotations. - repeated PersonDetectionAnnotation person_detection_annotations = 20; - - // If set, indicates an error. Note that for a single `AnnotateVideoRequest` - // some videos may succeed and some may fail. - google.rpc.Status error = 9; -} - -// Video annotation response. Included in the `response` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoResponse { - // Annotation results for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationResults annotation_results = 1; -} - -// Annotation progress for a single video. -message VideoAnnotationProgress { - // Video file location in - // [Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Approximate percentage processed thus far. Guaranteed to be - // 100 when fully processed. - int32 progress_percent = 2; - - // Time when the request was received. - google.protobuf.Timestamp start_time = 3; - - // Time of the most recent update. - google.protobuf.Timestamp update_time = 4; - - // Specifies which feature is being tracked if the request contains more than - // one feature. - Feature feature = 5; - - // Specifies which segment is being tracked if the request contains more than - // one segment. - VideoSegment segment = 6; -} - -// Video annotation progress. Included in the `metadata` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoProgress { - // Progress metadata for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationProgress annotation_progress = 1; -} - -// Config for SPEECH_TRANSCRIPTION. -message SpeechTranscriptionConfig { - // Required. *Required* The language of the supplied audio as a - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. - // Example: "en-US". - // See [Language Support](https://cloud.google.com/speech/docs/languages) - // for a list of the currently supported language codes. - string language_code = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Maximum number of recognition hypotheses to be returned. - // Specifically, the maximum number of `SpeechRecognitionAlternative` messages - // within each `SpeechTranscription`. The server may return fewer than - // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will - // return a maximum of one. If omitted, will return a maximum of one. - int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If set to `true`, the server will attempt to filter out - // profanities, replacing all but the initial character in each filtered word - // with asterisks, e.g. "f***". If set to `false` or omitted, profanities - // won't be filtered out. - bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A means to provide context to assist the speech recognition. - repeated SpeechContext speech_contexts = 4 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If 'true', adds punctuation to recognition result hypotheses. - // This feature is only available in select languages. Setting this for - // requests in other languages has no effect at all. The default 'false' value - // does not add punctuation to result hypotheses. NOTE: "This is currently - // offered as an experimental service, complimentary to all users. In the - // future this may be exclusively available as a premium feature." - bool enable_automatic_punctuation = 5 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. For file formats, such as MXF or MKV, supporting multiple audio - // tracks, specify up to two tracks. Default: track 0. - repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If 'true', enables speaker detection for each recognized word in - // the top alternative of the recognition result using a speaker_tag provided - // in the WordInfo. - // Note: When this is true, we send all the words from the beginning of the - // audio for the top alternative in every consecutive response. - // This is done in order to improve our speaker tags as our models learn to - // identify the speakers in the conversation over time. - bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If set, specifies the estimated number of speakers in the - // conversation. If not set, defaults to '2'. Ignored unless - // enable_speaker_diarization is set to true. - int32 diarization_speaker_count = 8 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If `true`, the top result includes a list of words and the - // confidence for those words. If `false`, no word-level confidence - // information is returned. The default is `false`. - bool enable_word_confidence = 9 [(google.api.field_behavior) = OPTIONAL]; -} - -// Provides "hints" to the speech recognizer to favor specific words and phrases -// in the results. -message SpeechContext { - // Optional. A list of strings containing words and phrases "hints" so that - // the speech recognition is more likely to recognize them. This can be used - // to improve the accuracy for specific words and phrases, for example, if - // specific commands are typically spoken by the user. This can also be used - // to add additional words to the vocabulary of the recognizer. See - // [usage limits](https://cloud.google.com/speech/limits#content). - repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; -} - -// A speech recognition result corresponding to a portion of the audio. -message SpeechTranscription { - // May contain one or more recognition hypotheses (up to the maximum specified - // in `max_alternatives`). These alternatives are ordered in terms of - // accuracy, with the top (first) alternative being the most probable, as - // ranked by the recognizer. - repeated SpeechRecognitionAlternative alternatives = 1; - - // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) - // language tag of the language in this result. This language code was - // detected to have the most likelihood of being spoken in the audio. - string language_code = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Alternative hypotheses (a.k.a. n-best list). -message SpeechRecognitionAlternative { - // Transcript text representing the words that the user spoke. - string transcript = 1; - - // Output only. The confidence estimate between 0.0 and 1.0. A higher number - // indicates an estimated greater likelihood that the recognized words are - // correct. This field is set only for the top alternative. - // This field is not guaranteed to be accurate and users should not rely on it - // to be always provided. - // The default of 0.0 is a sentinel value indicating `confidence` was not set. - float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A list of word-specific information for each recognized word. - // Note: When `enable_speaker_diarization` is set to true, you will see all - // the words from the beginning of the audio. - repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Word-specific information for recognized words. Word information is only -// included in the response when certain request parameters are set, such -// as `enable_word_time_offsets`. -message WordInfo { - // Time offset relative to the beginning of the audio, and - // corresponding to the start of the spoken word. This field is only set if - // `enable_word_time_offsets=true` and only in the top hypothesis. This is an - // experimental feature and the accuracy of the time offset can vary. - google.protobuf.Duration start_time = 1; - - // Time offset relative to the beginning of the audio, and - // corresponding to the end of the spoken word. This field is only set if - // `enable_word_time_offsets=true` and only in the top hypothesis. This is an - // experimental feature and the accuracy of the time offset can vary. - google.protobuf.Duration end_time = 2; - - // The word corresponding to this set of information. - string word = 3; - - // Output only. The confidence estimate between 0.0 and 1.0. A higher number - // indicates an estimated greater likelihood that the recognized words are - // correct. This field is set only for the top alternative. - // This field is not guaranteed to be accurate and users should not rely on it - // to be always provided. - // The default of 0.0 is a sentinel value indicating `confidence` was not set. - float confidence = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A distinct integer value is assigned for every speaker within - // the audio. This field specifies which one of those speakers was detected to - // have spoken this word. Value ranges from 1 up to diarization_speaker_count, - // and is only set if speaker diarization is enabled. - int32 speaker_tag = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// A vertex represents a 2D point in the image. -// NOTE: the normalized vertex coordinates are relative to the original image -// and range from 0 to 1. -message NormalizedVertex { - // X coordinate. - float x = 1; - - // Y coordinate. - float y = 2; -} - -// Normalized bounding polygon for text (that might not be aligned with axis). -// Contains list of the corner points in clockwise order starting from -// top-left corner. For example, for a rectangular bounding box: -// When the text is horizontal it might look like: -// 0----1 -// | | -// 3----2 -// -// When it's clockwise rotated 180 degrees around the top-left corner it -// becomes: -// 2----3 -// | | -// 1----0 -// -// and the vertex order will still be (0, 1, 2, 3). Note that values can be less -// than 0, or greater than 1 due to trignometric calculations for location of -// the box. -message NormalizedBoundingPoly { - // Normalized vertices of the bounding polygon. - repeated NormalizedVertex vertices = 1; -} - -// Video segment level annotation results for text detection. -message TextSegment { - // Video segment where a text snippet was detected. - VideoSegment segment = 1; - - // Confidence for the track of detected text. It is calculated as the highest - // over all frames where OCR detected text appears. - float confidence = 2; - - // Information related to the frames where OCR detected text appears. - repeated TextFrame frames = 3; -} - -// Video frame level annotation results for text annotation (OCR). -// Contains information regarding timestamp and bounding box locations for the -// frames containing detected OCR text snippets. -message TextFrame { - // Bounding polygon of the detected text for this frame. - NormalizedBoundingPoly rotated_bounding_box = 1; - - // Timestamp of this frame. - google.protobuf.Duration time_offset = 2; -} - -// Annotations related to one detected OCR text snippet. This will contain the -// corresponding text, confidence value, and frame level information for each -// detection. -message TextAnnotation { - // The detected text. - string text = 1; - - // All video segments where OCR detected text appears. - repeated TextSegment segments = 2; - - // Feature version. - string version = 3; -} - -// Video frame level annotations for object detection and tracking. This field -// stores per frame location, time offset, and confidence. -message ObjectTrackingFrame { - // The normalized bounding box location of this object track for the frame. - NormalizedBoundingBox normalized_bounding_box = 1; - - // The timestamp of the frame in microseconds. - google.protobuf.Duration time_offset = 2; -} - -// Annotations corresponding to one tracked object. -message ObjectTrackingAnnotation { - // Different representation of tracking info in non-streaming batch - // and streaming modes. - oneof track_info { - // Non-streaming batch mode ONLY. - // Each object track corresponds to one video segment where it appears. - VideoSegment segment = 3; - - // Streaming mode ONLY. - // In streaming mode, we do not know the end time of a tracked object - // before it is completed. Hence, there is no VideoSegment info returned. - // Instead, we provide a unique identifiable integer track_id so that - // the customers can correlate the results of the ongoing - // ObjectTrackAnnotation of the same track_id over time. - int64 track_id = 5; - } - - // Entity to specify the object category that this track is labeled as. - Entity entity = 1; - - // Object category's labeling confidence of this track. - float confidence = 4; - - // Information corresponding to all frames where this object track appears. - // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame - // messages in frames. - // Streaming mode: it can only be one ObjectTrackingFrame message in frames. - repeated ObjectTrackingFrame frames = 2; - - // Feature version. - string version = 6; -} - -// Annotation corresponding to one detected, tracked and recognized logo class. -message LogoRecognitionAnnotation { - // Entity category information to specify the logo class that all the logo - // tracks within this LogoRecognitionAnnotation are recognized as. - Entity entity = 1; - - // All logo tracks where the recognized logo appears. Each track corresponds - // to one logo instance appearing in consecutive frames. - repeated Track tracks = 2; - - // All video segments where the recognized logo appears. There might be - // multiple instances of the same logo class appearing in one VideoSegment. - repeated VideoSegment segments = 3; -} diff --git a/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.videointelligence.v1.json b/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.videointelligence.v1.json deleted file mode 100644 index ae482cdf..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/snippet_metadata.google.cloud.videointelligence.v1.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-videointelligence", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.videointelligence.v1", - "version": "v1" - } - ] - }, - "snippets": [ - { - "regionTag": "videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async", - "title": "videointelligence annotateVideo Sample", - "origin": "API_DEFINITION", - "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", - "canonical": true, - "file": "video_intelligence_service.annotate_video.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 92, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1.VideoIntelligenceService.AnnotateVideo", - "async": true, - "parameters": [ - { - "name": "input_uri", - "type": "TYPE_STRING" - }, - { - "name": "input_content", - "type": "TYPE_BYTES" - }, - { - "name": "features", - "type": "TYPE_ENUM[]" - }, - { - "name": "video_context", - "type": ".google.cloud.videointelligence.v1.VideoContext" - }, - { - "name": "output_uri", - "type": "TYPE_STRING" - }, - { - "name": "location_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "VideoIntelligenceServiceClient", - "fullName": "google.cloud.videointelligence.v1.VideoIntelligenceServiceClient" - }, - "method": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1.VideoIntelligenceService.AnnotateVideo", - "service": { - "shortName": "VideoIntelligenceService", - "fullName": "google.cloud.videointelligence.v1.VideoIntelligenceService" - } - } - } - } - ] -} diff --git a/owl-bot-staging/v1/samples/generated/v1/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1/samples/generated/v1/video_intelligence_service.annotate_video.js deleted file mode 100644 index caac0742..00000000 --- a/owl-bot-staging/v1/samples/generated/v1/video_intelligence_service.annotate_video.js +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(features) { - // [START videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Input video location. Currently, only - * Cloud Storage (https://cloud.google.com/storage/) URIs are - * supported. URIs must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For - * more information, see Request - * URIs (https://cloud.google.com/storage/docs/request-endpoints). To identify - * multiple videos, a video URI may include wildcards in the `object-id`. - * Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` must be unset. - */ - // const inputUri = 'abc123' - /** - * The video data bytes. - * If unset, the input video(s) should be specified via the `input_uri`. - * If set, `input_uri` must be unset. - */ - // const inputContent = 'Buffer.from('string')' - /** - * Required. Requested video annotation features. - */ - // const features = 1234 - /** - * Additional video context and/or feature-specific parameters. - */ - // const videoContext = {} - /** - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only Cloud Storage (https://cloud.google.com/storage/) - * URIs are supported. These must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For - * more information, see Request - * URIs (https://cloud.google.com/storage/docs/request-endpoints). - */ - // const outputUri = 'abc123' - /** - * Optional. Cloud region where annotation should take place. Supported cloud - * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no - * region is specified, the region will be determined based on video file - * location. - */ - // const locationId = 'abc123' - - // Imports the Videointelligence library - const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1; - - // Instantiates a client - const videointelligenceClient = new VideoIntelligenceServiceClient(); - - async function callAnnotateVideo() { - // Construct request - const request = { - features, - }; - - // Run request - const [operation] = await videointelligenceClient.annotateVideo(request); - const [response] = await operation.promise(); - console.log(response); - } - - callAnnotateVideo(); - // [END videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1/src/index.ts b/owl-bot-staging/v1/src/index.ts deleted file mode 100644 index b7940547..00000000 --- a/owl-bot-staging/v1/src/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1 from './v1'; -const VideoIntelligenceServiceClient = v1.VideoIntelligenceServiceClient; -type VideoIntelligenceServiceClient = v1.VideoIntelligenceServiceClient; -export {v1, VideoIntelligenceServiceClient}; -export default {v1, VideoIntelligenceServiceClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/v1/src/v1/gapic_metadata.json b/owl-bot-staging/v1/src/v1/gapic_metadata.json deleted file mode 100644 index daf4c173..00000000 --- a/owl-bot-staging/v1/src/v1/gapic_metadata.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.videointelligence.v1", - "libraryPackage": "@google-cloud/video-intelligence", - "services": { - "VideoIntelligenceService": { - "clients": { - "grpc": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1/src/v1/index.ts b/owl-bot-staging/v1/src/v1/index.ts deleted file mode 100644 index 6fcd1933..00000000 --- a/owl-bot-staging/v1/src/v1/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1/src/v1/video_intelligence_service_client.ts b/owl-bot-staging/v1/src/v1/video_intelligence_service_client.ts deleted file mode 100644 index e7e287c7..00000000 --- a/owl-bot-staging/v1/src/v1/video_intelligence_service_client.ts +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; - -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1/video_intelligence_service_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './video_intelligence_service_client_config.json'; -import { operationsProtos } from 'google-gax'; -const version = require('../../../package.json').version; - -/** - * Service that implements the Video Intelligence API. - * @class - * @memberof v1 - */ -export class VideoIntelligenceServiceClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - operationsClient: gax.OperationsClient; - videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of VideoIntelligenceServiceClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - */ - constructor(opts?: ClientOptions) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gax.fallback : gax; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1/operations/{name=projects/*/locations/*/operations/*}:cancel',}], - },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.GetOperation',get: '/v1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.ListOperations',get: '/v1/{name=projects/*/locations/*}/operations',}]; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const annotateVideoResponse = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1.AnnotateVideoResponse') as gax.protobuf.Type; - const annotateVideoMetadata = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1.AnnotateVideoProgress') as gax.protobuf.Type; - - this.descriptors.longrunning = { - annotateVideo: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - annotateVideoResponse.decode.bind(annotateVideoResponse), - annotateVideoMetadata.decode.bind(annotateVideoMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.videointelligence.v1.VideoIntelligenceService', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = gax.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.videoIntelligenceServiceStub) { - return this.videoIntelligenceServiceStub; - } - - // Put together the "service stub" for - // google.cloud.videointelligence.v1.VideoIntelligenceService. - this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1.VideoIntelligenceService') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.videointelligence.v1.VideoIntelligenceService, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const videoIntelligenceServiceStubMethods = - ['annotateVideo']; - for (const methodName of videoIntelligenceServiceStubMethods) { - const callPromise = this.videoIntelligenceServiceStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.videoIntelligenceServiceStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'videointelligence.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'videointelligence.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- - -/** - * Performs asynchronous video annotation. Progress and results can be - * retrieved through the `google.longrunning.Operations` interface. - * `Operation.metadata` contains `AnnotateVideoProgress` (progress). - * `Operation.response` contains `AnnotateVideoResponse` (results). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.inputUri - * Input video location. Currently, only - * [Cloud Storage](https://cloud.google.com/storage/) URIs are - * supported. URIs must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For - * more information, see [Request - * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify - * multiple videos, a video URI may include wildcards in the `object-id`. - * Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` must be unset. - * @param {Buffer} request.inputContent - * The video data bytes. - * If unset, the input video(s) should be specified via the `input_uri`. - * If set, `input_uri` must be unset. - * @param {number[]} request.features - * Required. Requested video annotation features. - * @param {google.cloud.videointelligence.v1.VideoContext} request.videoContext - * Additional video context and/or feature-specific parameters. - * @param {string} [request.outputUri] - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only [Cloud Storage](https://cloud.google.com/storage/) - * URIs are supported. These must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For - * more information, see [Request - * URIs](https://cloud.google.com/storage/docs/request-endpoints). - * @param {string} [request.locationId] - * Optional. Cloud region where annotation should take place. Supported cloud - * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no - * region is specified, the region will be determined based on video file - * location. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async - */ - annotateVideo( - request?: protos.google.cloud.videointelligence.v1.IAnnotateVideoRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - annotateVideo( - request: protos.google.cloud.videointelligence.v1.IAnnotateVideoRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request: protos.google.cloud.videointelligence.v1.IAnnotateVideoRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request?: protos.google.cloud.videointelligence.v1.IAnnotateVideoRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - this.initialize(); - return this.innerApiCalls.annotateVideo(request, options, callback); - } -/** - * Check the status of the long running operation returned by `annotateVideo()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1_generated_VideoIntelligenceService_AnnotateVideo_async - */ - async checkAnnotateVideoProgress(name: string): Promise>{ - const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.videoIntelligenceServiceStub && !this._terminated) { - return this.videoIntelligenceServiceStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/v1/src/v1/video_intelligence_service_client_config.json b/owl-bot-staging/v1/src/v1/video_intelligence_service_client_config.json deleted file mode 100644 index 49091879..00000000 --- a/owl-bot-staging/v1/src/v1/video_intelligence_service_client_config.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "interfaces": { - "google.cloud.videointelligence.v1.VideoIntelligenceService": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - }, - "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.5, - "max_retry_delay_millis": 120000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "AnnotateVideo": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" - } - } - } - } -} diff --git a/owl-bot-staging/v1/src/v1/video_intelligence_service_proto_list.json b/owl-bot-staging/v1/src/v1/video_intelligence_service_proto_list.json deleted file mode 100644 index e28d401f..00000000 --- a/owl-bot-staging/v1/src/v1/video_intelligence_service_proto_list.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - "../../protos/google/cloud/videointelligence/v1/video_intelligence.proto" -] diff --git a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js deleted file mode 100644 index 85a71c33..00000000 --- a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const videointelligence = require('@google-cloud/video-intelligence'); - -function main() { - const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); -} - -main(); diff --git a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index d466c7b0..00000000 --- a/owl-bot-staging/v1/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; - -// check that the client class type name can be used -function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); - doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); -} - -main(); diff --git a/owl-bot-staging/v1/system-test/install.ts b/owl-bot-staging/v1/system-test/install.ts deleted file mode 100644 index 8ec45222..00000000 --- a/owl-bot-staging/v1/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import { packNTest } from 'pack-n-play'; -import { readFileSync } from 'fs'; -import { describe, it } from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/v1/test/gapic_video_intelligence_service_v1.ts b/owl-bot-staging/v1/test/gapic_video_intelligence_service_v1.ts deleted file mode 100644 index a62a0554..00000000 --- a/owl-bot-staging/v1/test/gapic_video_intelligence_service_v1.ts +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import { describe, it } from 'mocha'; -import * as videointelligenceserviceModule from '../src'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -describe('v1.VideoIntelligenceServiceClient', () => { - it('has servicePath', () => { - const servicePath = videointelligenceserviceModule.v1.VideoIntelligenceServiceClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = videointelligenceserviceModule.v1.VideoIntelligenceServiceClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = videointelligenceserviceModule.v1.VideoIntelligenceServiceClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - await client.initialize(); - assert(client.videoIntelligenceServiceStub); - }); - - it('has close method for the initialized client', done => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.videoIntelligenceServiceStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - - describe('annotateVideo', () => { - it('invokes annotateVideo without error', async () => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); - const [operation] = await client.annotateVideo(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo without error using callback', async () => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.annotateVideo( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes annotateVideo with call error', async () => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.annotateVideo(request), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo with LRO error', async () => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.annotateVideo(request); - await assert.rejects(operation.promise(), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes checkAnnotateVideoProgress without error', async () => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkAnnotateVideoProgress with error', async () => { - const client = new videointelligenceserviceModule.v1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); -}); diff --git a/owl-bot-staging/v1/tsconfig.json b/owl-bot-staging/v1/tsconfig.json deleted file mode 100644 index c78f1c88..00000000 --- a/owl-bot-staging/v1/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/v1/webpack.config.js b/owl-bot-staging/v1/webpack.config.js deleted file mode 100644 index 9657601b..00000000 --- a/owl-bot-staging/v1/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'videointelligence', - filename: './videointelligence.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/owl-bot-staging/v1beta2/.eslintignore b/owl-bot-staging/v1beta2/.eslintignore deleted file mode 100644 index cfc348ec..00000000 --- a/owl-bot-staging/v1beta2/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/v1beta2/.eslintrc.json b/owl-bot-staging/v1beta2/.eslintrc.json deleted file mode 100644 index 78215349..00000000 --- a/owl-bot-staging/v1beta2/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/v1beta2/.gitignore b/owl-bot-staging/v1beta2/.gitignore deleted file mode 100644 index 5d32b237..00000000 --- a/owl-bot-staging/v1beta2/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/v1beta2/.jsdoc.js b/owl-bot-staging/v1beta2/.jsdoc.js deleted file mode 100644 index 6c816e68..00000000 --- a/owl-bot-staging/v1beta2/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/video-intelligence', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/v1beta2/.mocharc.js b/owl-bot-staging/v1beta2/.mocharc.js deleted file mode 100644 index 481c522b..00000000 --- a/owl-bot-staging/v1beta2/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/v1beta2/.prettierrc.js b/owl-bot-staging/v1beta2/.prettierrc.js deleted file mode 100644 index 494e1478..00000000 --- a/owl-bot-staging/v1beta2/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/v1beta2/README.md b/owl-bot-staging/v1beta2/README.md deleted file mode 100644 index d1c53e8c..00000000 --- a/owl-bot-staging/v1beta2/README.md +++ /dev/null @@ -1 +0,0 @@ -Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1beta2/linkinator.config.json b/owl-bot-staging/v1beta2/linkinator.config.json deleted file mode 100644 index befd23c8..00000000 --- a/owl-bot-staging/v1beta2/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/v1beta2/package.json b/owl-bot-staging/v1beta2/package.json deleted file mode 100644 index 6b17fa2c..00000000 --- a/owl-bot-staging/v1beta2/package.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "name": "@google-cloud/video-intelligence", - "version": "0.1.0", - "description": "Videointelligence client for Node.js", - "repository": "googleapis/nodejs-videointelligence", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google videointelligence", - "videointelligence", - "video intelligence service" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^3.1.1" - }, - "devDependencies": { - "@types/mocha": "^9.1.0", - "@types/node": "^16.0.0", - "@types/sinon": "^10.0.8", - "c8": "^7.11.0", - "gts": "^3.1.0", - "jsdoc": "^3.6.7", - "jsdoc-fresh": "^1.1.1", - "jsdoc-region-tag": "^1.3.1", - "linkinator": "^3.0.0", - "mocha": "^9.1.4", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^13.0.0", - "ts-loader": "^9.2.6", - "typescript": "^4.5.5", - "webpack": "^5.67.0", - "webpack-cli": "^4.9.1" - }, - "engines": { - "node": ">=v12" - } -} diff --git a/owl-bot-staging/v1beta2/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto b/owl-bot-staging/v1beta2/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto deleted file mode 100644 index 81648c52..00000000 --- a/owl-bot-staging/v1beta2/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto +++ /dev/null @@ -1,410 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.cloud.videointelligence.v1beta2; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.VideoIntelligence.V1Beta2"; -option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence"; -option java_multiple_files = true; -option java_outer_classname = "VideoIntelligenceServiceProto"; -option java_package = "com.google.cloud.videointelligence.v1beta2"; -option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1beta2"; -option ruby_package = "Google::Cloud::VideoIntelligence::V1beta2"; - -// Service that implements Google Cloud Video Intelligence API. -service VideoIntelligenceService { - option (google.api.default_host) = "videointelligence.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; - - // Performs asynchronous video annotation. Progress and results can be - // retrieved through the `google.longrunning.Operations` interface. - // `Operation.metadata` contains `AnnotateVideoProgress` (progress). - // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta2/videos:annotate" - body: "*" - }; - option (google.api.method_signature) = "input_uri,features"; - option (google.longrunning.operation_info) = { - response_type: "AnnotateVideoResponse" - metadata_type: "AnnotateVideoProgress" - }; - } -} - -// Video annotation request. -message AnnotateVideoRequest { - // Input video location. Currently, only - // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are - // supported, which must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request - // URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI - // may include wildcards in `object-id`, and thus identify multiple videos. - // Supported wildcards: '*' to match 0 or more characters; - // '?' to match 1 character. If unset, the input video should be embedded - // in the request as `input_content`. If set, `input_content` should be unset. - string input_uri = 1; - - // The video data bytes. - // If unset, the input video(s) should be specified via `input_uri`. - // If set, `input_uri` should be unset. - bytes input_content = 6; - - // Required. Requested video annotation features. - repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; - - // Additional video context and/or feature-specific parameters. - VideoContext video_context = 3; - - // Optional. Location where the output (in JSON format) should be stored. - // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) - // URIs are supported, which must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request - // URIs](https://cloud.google.com/storage/docs/request-endpoints). - string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Cloud region where annotation should take place. Supported cloud - // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - // is specified, a region will be determined based on video file location. - string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; -} - -// Video context and/or feature-specific parameters. -message VideoContext { - // Video segments to annotate. The segments may overlap and are not required - // to be contiguous or span the whole video. If unspecified, each video is - // treated as a single segment. - repeated VideoSegment segments = 1; - - // Config for LABEL_DETECTION. - LabelDetectionConfig label_detection_config = 2; - - // Config for SHOT_CHANGE_DETECTION. - ShotChangeDetectionConfig shot_change_detection_config = 3; - - // Config for EXPLICIT_CONTENT_DETECTION. - ExplicitContentDetectionConfig explicit_content_detection_config = 4; - - // Config for FACE_DETECTION. - FaceDetectionConfig face_detection_config = 5; -} - -// Config for LABEL_DETECTION. -message LabelDetectionConfig { - // What labels should be detected with LABEL_DETECTION, in addition to - // video-level labels or segment-level labels. - // If unspecified, defaults to `SHOT_MODE`. - LabelDetectionMode label_detection_mode = 1; - - // Whether the video has been shot from a stationary (i.e. non-moving) camera. - // When set to true, might improve detection accuracy for moving objects. - // Should be used with `SHOT_AND_FRAME_MODE` enabled. - bool stationary_camera = 2; - - // Model to use for label detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 3; -} - -// Config for SHOT_CHANGE_DETECTION. -message ShotChangeDetectionConfig { - // Model to use for shot change detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for EXPLICIT_CONTENT_DETECTION. -message ExplicitContentDetectionConfig { - // Model to use for explicit content detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for FACE_DETECTION. -message FaceDetectionConfig { - // Model to use for face detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; - - // Whether bounding boxes be included in the face annotation output. - bool include_bounding_boxes = 2; -} - -// Video segment. -message VideoSegment { - // Time-offset, relative to the beginning of the video, - // corresponding to the start of the segment (inclusive). - google.protobuf.Duration start_time_offset = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the end of the segment (inclusive). - google.protobuf.Duration end_time_offset = 2; -} - -// Video segment level annotation results for label detection. -message LabelSegment { - // Video segment where a label was detected. - VideoSegment segment = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Video frame level annotation results for label detection. -message LabelFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Detected entity from video analysis. -message Entity { - // Opaque entity ID. Some IDs may be available in - // [Google Knowledge Graph Search - // API](https://developers.google.com/knowledge-graph/). - string entity_id = 1; - - // Textual description, e.g. `Fixed-gear bicycle`. - string description = 2; - - // Language code for `description` in BCP-47 format. - string language_code = 3; -} - -// Label annotation. -message LabelAnnotation { - // Detected entity. - Entity entity = 1; - - // Common categories for the detected entity. - // E.g. when the label is `Terrier` the category is likely `dog`. And in some - // cases there might be more than one categories e.g. `Terrier` could also be - // a `pet`. - repeated Entity category_entities = 2; - - // All video segments where a label was detected. - repeated LabelSegment segments = 3; - - // All video frames where a label was detected. - repeated LabelFrame frames = 4; -} - -// Video frame level annotation results for explicit content. -message ExplicitContentFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Likelihood of the pornography content.. - Likelihood pornography_likelihood = 2; -} - -// Explicit content annotation (based on per-frame visual signals only). -// If no explicit content has been detected in a frame, no annotations are -// present for that frame. -message ExplicitContentAnnotation { - // All video frames where explicit content was detected. - repeated ExplicitContentFrame frames = 1; -} - -// Normalized bounding box. -// The normalized vertex coordinates are relative to the original image. -// Range: [0, 1]. -message NormalizedBoundingBox { - // Left X coordinate. - float left = 1; - - // Top Y coordinate. - float top = 2; - - // Right X coordinate. - float right = 3; - - // Bottom Y coordinate. - float bottom = 4; -} - -// Video segment level annotation results for face detection. -message FaceSegment { - // Video segment where a face was detected. - VideoSegment segment = 1; -} - -// Video frame level annotation results for face detection. -message FaceFrame { - // Normalized Bounding boxes in a frame. - // There can be more than one boxes if the same face is detected in multiple - // locations within the current frame. - repeated NormalizedBoundingBox normalized_bounding_boxes = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the video frame for this location. - google.protobuf.Duration time_offset = 2; -} - -// Face annotation. -message FaceAnnotation { - // Thumbnail of a representative face view (in JPEG format). - bytes thumbnail = 1; - - // All video segments where a face was detected. - repeated FaceSegment segments = 2; - - // All video frames where a face was detected. - repeated FaceFrame frames = 3; -} - -// Annotation results for a single video. -message VideoAnnotationResults { - // Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Label annotations on video level or user specified segment level. - // There is exactly one element for each unique label. - repeated LabelAnnotation segment_label_annotations = 2; - - // Label annotations on shot level. - // There is exactly one element for each unique label. - repeated LabelAnnotation shot_label_annotations = 3; - - // Label annotations on frame level. - // There is exactly one element for each unique label. - repeated LabelAnnotation frame_label_annotations = 4; - - // Face annotations. There is exactly one element for each unique face. - repeated FaceAnnotation face_annotations = 5; - - // Shot annotations. Each shot is represented as a video segment. - repeated VideoSegment shot_annotations = 6; - - // Explicit content annotation. - ExplicitContentAnnotation explicit_annotation = 7; - - // If set, indicates an error. Note that for a single `AnnotateVideoRequest` - // some videos may succeed and some may fail. - google.rpc.Status error = 9; -} - -// Video annotation response. Included in the `response` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoResponse { - // Annotation results for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationResults annotation_results = 1; -} - -// Annotation progress for a single video. -message VideoAnnotationProgress { - // Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Approximate percentage processed thus far. - // Guaranteed to be 100 when fully processed. - int32 progress_percent = 2; - - // Time when the request was received. - google.protobuf.Timestamp start_time = 3; - - // Time of the most recent update. - google.protobuf.Timestamp update_time = 4; -} - -// Video annotation progress. Included in the `metadata` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoProgress { - // Progress metadata for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationProgress annotation_progress = 1; -} - -// Video annotation feature. -enum Feature { - // Unspecified. - FEATURE_UNSPECIFIED = 0; - - // Label detection. Detect objects, such as dog or flower. - LABEL_DETECTION = 1; - - // Shot change detection. - SHOT_CHANGE_DETECTION = 2; - - // Explicit content detection. - EXPLICIT_CONTENT_DETECTION = 3; - - // Human face detection and tracking. - FACE_DETECTION = 4; -} - -// Label detection mode. -enum LabelDetectionMode { - // Unspecified. - LABEL_DETECTION_MODE_UNSPECIFIED = 0; - - // Detect shot-level labels. - SHOT_MODE = 1; - - // Detect frame-level labels. - FRAME_MODE = 2; - - // Detect both shot-level and frame-level labels. - SHOT_AND_FRAME_MODE = 3; -} - -// Bucketized representation of likelihood. -enum Likelihood { - // Unspecified likelihood. - LIKELIHOOD_UNSPECIFIED = 0; - - // Very unlikely. - VERY_UNLIKELY = 1; - - // Unlikely. - UNLIKELY = 2; - - // Possible. - POSSIBLE = 3; - - // Likely. - LIKELY = 4; - - // Very likely. - VERY_LIKELY = 5; -} diff --git a/owl-bot-staging/v1beta2/samples/generated/v1beta2/snippet_metadata.google.cloud.videointelligence.v1beta2.json b/owl-bot-staging/v1beta2/samples/generated/v1beta2/snippet_metadata.google.cloud.videointelligence.v1beta2.json deleted file mode 100644 index f1f4b140..00000000 --- a/owl-bot-staging/v1beta2/samples/generated/v1beta2/snippet_metadata.google.cloud.videointelligence.v1beta2.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-videointelligence", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.videointelligence.v1beta2", - "version": "v1beta2" - } - ] - }, - "snippets": [ - { - "regionTag": "videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async", - "title": "videointelligence annotateVideo Sample", - "origin": "API_DEFINITION", - "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", - "canonical": true, - "file": "video_intelligence_service.annotate_video.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 91, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1beta2.VideoIntelligenceService.AnnotateVideo", - "async": true, - "parameters": [ - { - "name": "input_uri", - "type": "TYPE_STRING" - }, - { - "name": "input_content", - "type": "TYPE_BYTES" - }, - { - "name": "features", - "type": "TYPE_ENUM[]" - }, - { - "name": "video_context", - "type": ".google.cloud.videointelligence.v1beta2.VideoContext" - }, - { - "name": "output_uri", - "type": "TYPE_STRING" - }, - { - "name": "location_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "VideoIntelligenceServiceClient", - "fullName": "google.cloud.videointelligence.v1beta2.VideoIntelligenceServiceClient" - }, - "method": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1beta2.VideoIntelligenceService.AnnotateVideo", - "service": { - "shortName": "VideoIntelligenceService", - "fullName": "google.cloud.videointelligence.v1beta2.VideoIntelligenceService" - } - } - } - } - ] -} diff --git a/owl-bot-staging/v1beta2/samples/generated/v1beta2/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1beta2/samples/generated/v1beta2/video_intelligence_service.annotate_video.js deleted file mode 100644 index 6c39d8b5..00000000 --- a/owl-bot-staging/v1beta2/samples/generated/v1beta2/video_intelligence_service.annotate_video.js +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(features) { - // [START videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Input video location. Currently, only - * Google Cloud Storage (https://cloud.google.com/storage/) URIs are - * supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For - * more information, see Request - * URIs (https://cloud.google.com/storage/docs/request-endpoints). A video URI - * may include wildcards in `object-id`, and thus identify multiple videos. - * Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` should be unset. - */ - // const inputUri = 'abc123' - /** - * The video data bytes. - * If unset, the input video(s) should be specified via `input_uri`. - * If set, `input_uri` should be unset. - */ - // const inputContent = 'Buffer.from('string')' - /** - * Required. Requested video annotation features. - */ - // const features = 1234 - /** - * Additional video context and/or feature-specific parameters. - */ - // const videoContext = {} - /** - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only Google Cloud Storage (https://cloud.google.com/storage/) - * URIs are supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For - * more information, see Request - * URIs (https://cloud.google.com/storage/docs/request-endpoints). - */ - // const outputUri = 'abc123' - /** - * Optional. Cloud region where annotation should take place. Supported cloud - * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - * is specified, a region will be determined based on video file location. - */ - // const locationId = 'abc123' - - // Imports the Videointelligence library - const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1beta2; - - // Instantiates a client - const videointelligenceClient = new VideoIntelligenceServiceClient(); - - async function callAnnotateVideo() { - // Construct request - const request = { - features, - }; - - // Run request - const [operation] = await videointelligenceClient.annotateVideo(request); - const [response] = await operation.promise(); - console.log(response); - } - - callAnnotateVideo(); - // [END videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1beta2/src/index.ts b/owl-bot-staging/v1beta2/src/index.ts deleted file mode 100644 index c6f51173..00000000 --- a/owl-bot-staging/v1beta2/src/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1beta2 from './v1beta2'; -const VideoIntelligenceServiceClient = v1beta2.VideoIntelligenceServiceClient; -type VideoIntelligenceServiceClient = v1beta2.VideoIntelligenceServiceClient; -export {v1beta2, VideoIntelligenceServiceClient}; -export default {v1beta2, VideoIntelligenceServiceClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/v1beta2/src/v1beta2/gapic_metadata.json b/owl-bot-staging/v1beta2/src/v1beta2/gapic_metadata.json deleted file mode 100644 index a1dcd92a..00000000 --- a/owl-bot-staging/v1beta2/src/v1beta2/gapic_metadata.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.videointelligence.v1beta2", - "libraryPackage": "@google-cloud/video-intelligence", - "services": { - "VideoIntelligenceService": { - "clients": { - "grpc": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1beta2/src/v1beta2/index.ts b/owl-bot-staging/v1beta2/src/v1beta2/index.ts deleted file mode 100644 index 6fcd1933..00000000 --- a/owl-bot-staging/v1beta2/src/v1beta2/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client.ts b/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client.ts deleted file mode 100644 index b7792576..00000000 --- a/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client.ts +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; - -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1beta2/video_intelligence_service_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './video_intelligence_service_client_config.json'; -import { operationsProtos } from 'google-gax'; -const version = require('../../../package.json').version; - -/** - * Service that implements Google Cloud Video Intelligence API. - * @class - * @memberof v1beta2 - */ -export class VideoIntelligenceServiceClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - operationsClient: gax.OperationsClient; - videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of VideoIntelligenceServiceClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - */ - constructor(opts?: ClientOptions) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gax.fallback : gax; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.longrunning.Operations.ListOperations',get: '/v1beta2/{name=projects/*/locations/*}/operations',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1beta2/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1beta2/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1beta2/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1beta2/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1beta2/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1beta2/operations/{name=projects/*/locations/*/operations/*}:cancel',}], - }]; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const annotateVideoResponse = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1beta2.AnnotateVideoResponse') as gax.protobuf.Type; - const annotateVideoMetadata = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1beta2.AnnotateVideoProgress') as gax.protobuf.Type; - - this.descriptors.longrunning = { - annotateVideo: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - annotateVideoResponse.decode.bind(annotateVideoResponse), - annotateVideoMetadata.decode.bind(annotateVideoMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.videointelligence.v1beta2.VideoIntelligenceService', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = gax.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.videoIntelligenceServiceStub) { - return this.videoIntelligenceServiceStub; - } - - // Put together the "service stub" for - // google.cloud.videointelligence.v1beta2.VideoIntelligenceService. - this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1beta2.VideoIntelligenceService') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.videointelligence.v1beta2.VideoIntelligenceService, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const videoIntelligenceServiceStubMethods = - ['annotateVideo']; - for (const methodName of videoIntelligenceServiceStubMethods) { - const callPromise = this.videoIntelligenceServiceStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.videoIntelligenceServiceStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'videointelligence.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'videointelligence.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- - -/** - * Performs asynchronous video annotation. Progress and results can be - * retrieved through the `google.longrunning.Operations` interface. - * `Operation.metadata` contains `AnnotateVideoProgress` (progress). - * `Operation.response` contains `AnnotateVideoResponse` (results). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.inputUri - * Input video location. Currently, only - * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are - * supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For - * more information, see [Request - * URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI - * may include wildcards in `object-id`, and thus identify multiple videos. - * Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` should be unset. - * @param {Buffer} request.inputContent - * The video data bytes. - * If unset, the input video(s) should be specified via `input_uri`. - * If set, `input_uri` should be unset. - * @param {number[]} request.features - * Required. Requested video annotation features. - * @param {google.cloud.videointelligence.v1beta2.VideoContext} request.videoContext - * Additional video context and/or feature-specific parameters. - * @param {string} [request.outputUri] - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) - * URIs are supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For - * more information, see [Request - * URIs](https://cloud.google.com/storage/docs/request-endpoints). - * @param {string} [request.locationId] - * Optional. Cloud region where annotation should take place. Supported cloud - * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - * is specified, a region will be determined based on video file location. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1beta2/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async - */ - annotateVideo( - request?: protos.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - annotateVideo( - request: protos.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request: protos.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request?: protos.google.cloud.videointelligence.v1beta2.IAnnotateVideoRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - this.initialize(); - return this.innerApiCalls.annotateVideo(request, options, callback); - } -/** - * Check the status of the long running operation returned by `annotateVideo()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1beta2/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1beta2_generated_VideoIntelligenceService_AnnotateVideo_async - */ - async checkAnnotateVideoProgress(name: string): Promise>{ - const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.videoIntelligenceServiceStub && !this._terminated) { - return this.videoIntelligenceServiceStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client_config.json b/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client_config.json deleted file mode 100644 index f1fd51a8..00000000 --- a/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_client_config.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "interfaces": { - "google.cloud.videointelligence.v1beta2.VideoIntelligenceService": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - }, - "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.5, - "max_retry_delay_millis": 120000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "AnnotateVideo": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" - } - } - } - } -} diff --git a/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_proto_list.json b/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_proto_list.json deleted file mode 100644 index 02f0a919..00000000 --- a/owl-bot-staging/v1beta2/src/v1beta2/video_intelligence_service_proto_list.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - "../../protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto" -] diff --git a/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.js deleted file mode 100644 index 85a71c33..00000000 --- a/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const videointelligence = require('@google-cloud/video-intelligence'); - -function main() { - const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); -} - -main(); diff --git a/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index d466c7b0..00000000 --- a/owl-bot-staging/v1beta2/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; - -// check that the client class type name can be used -function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); - doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); -} - -main(); diff --git a/owl-bot-staging/v1beta2/system-test/install.ts b/owl-bot-staging/v1beta2/system-test/install.ts deleted file mode 100644 index 8ec45222..00000000 --- a/owl-bot-staging/v1beta2/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import { packNTest } from 'pack-n-play'; -import { readFileSync } from 'fs'; -import { describe, it } from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/v1beta2/test/gapic_video_intelligence_service_v1beta2.ts b/owl-bot-staging/v1beta2/test/gapic_video_intelligence_service_v1beta2.ts deleted file mode 100644 index aacaf8dc..00000000 --- a/owl-bot-staging/v1beta2/test/gapic_video_intelligence_service_v1beta2.ts +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import { describe, it } from 'mocha'; -import * as videointelligenceserviceModule from '../src'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -describe('v1beta2.VideoIntelligenceServiceClient', () => { - it('has servicePath', () => { - const servicePath = videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - await client.initialize(); - assert(client.videoIntelligenceServiceStub); - }); - - it('has close method for the initialized client', done => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.videoIntelligenceServiceStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - - describe('annotateVideo', () => { - it('invokes annotateVideo without error', async () => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); - const [operation] = await client.annotateVideo(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo without error using callback', async () => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.annotateVideo( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes annotateVideo with call error', async () => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.annotateVideo(request), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo with LRO error', async () => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.annotateVideo(request); - await assert.rejects(operation.promise(), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes checkAnnotateVideoProgress without error', async () => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkAnnotateVideoProgress with error', async () => { - const client = new videointelligenceserviceModule.v1beta2.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); -}); diff --git a/owl-bot-staging/v1beta2/tsconfig.json b/owl-bot-staging/v1beta2/tsconfig.json deleted file mode 100644 index c78f1c88..00000000 --- a/owl-bot-staging/v1beta2/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/v1beta2/webpack.config.js b/owl-bot-staging/v1beta2/webpack.config.js deleted file mode 100644 index 9657601b..00000000 --- a/owl-bot-staging/v1beta2/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'videointelligence', - filename: './videointelligence.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/owl-bot-staging/v1p1beta1/.eslintignore b/owl-bot-staging/v1p1beta1/.eslintignore deleted file mode 100644 index cfc348ec..00000000 --- a/owl-bot-staging/v1p1beta1/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/v1p1beta1/.eslintrc.json b/owl-bot-staging/v1p1beta1/.eslintrc.json deleted file mode 100644 index 78215349..00000000 --- a/owl-bot-staging/v1p1beta1/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/v1p1beta1/.gitignore b/owl-bot-staging/v1p1beta1/.gitignore deleted file mode 100644 index 5d32b237..00000000 --- a/owl-bot-staging/v1p1beta1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/v1p1beta1/.jsdoc.js b/owl-bot-staging/v1p1beta1/.jsdoc.js deleted file mode 100644 index 6c816e68..00000000 --- a/owl-bot-staging/v1p1beta1/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/video-intelligence', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/v1p1beta1/.mocharc.js b/owl-bot-staging/v1p1beta1/.mocharc.js deleted file mode 100644 index 481c522b..00000000 --- a/owl-bot-staging/v1p1beta1/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/v1p1beta1/.prettierrc.js b/owl-bot-staging/v1p1beta1/.prettierrc.js deleted file mode 100644 index 494e1478..00000000 --- a/owl-bot-staging/v1p1beta1/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/v1p1beta1/README.md b/owl-bot-staging/v1p1beta1/README.md deleted file mode 100644 index d1c53e8c..00000000 --- a/owl-bot-staging/v1p1beta1/README.md +++ /dev/null @@ -1 +0,0 @@ -Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1p1beta1/linkinator.config.json b/owl-bot-staging/v1p1beta1/linkinator.config.json deleted file mode 100644 index befd23c8..00000000 --- a/owl-bot-staging/v1p1beta1/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/v1p1beta1/package.json b/owl-bot-staging/v1p1beta1/package.json deleted file mode 100644 index 6b17fa2c..00000000 --- a/owl-bot-staging/v1p1beta1/package.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "name": "@google-cloud/video-intelligence", - "version": "0.1.0", - "description": "Videointelligence client for Node.js", - "repository": "googleapis/nodejs-videointelligence", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google videointelligence", - "videointelligence", - "video intelligence service" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^3.1.1" - }, - "devDependencies": { - "@types/mocha": "^9.1.0", - "@types/node": "^16.0.0", - "@types/sinon": "^10.0.8", - "c8": "^7.11.0", - "gts": "^3.1.0", - "jsdoc": "^3.6.7", - "jsdoc-fresh": "^1.1.1", - "jsdoc-region-tag": "^1.3.1", - "linkinator": "^3.0.0", - "mocha": "^9.1.4", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^13.0.0", - "ts-loader": "^9.2.6", - "typescript": "^4.5.5", - "webpack": "^5.67.0", - "webpack-cli": "^4.9.1" - }, - "engines": { - "node": ">=v12" - } -} diff --git a/owl-bot-staging/v1p1beta1/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto b/owl-bot-staging/v1p1beta1/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto deleted file mode 100644 index 3c0b8b56..00000000 --- a/owl-bot-staging/v1p1beta1/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.cloud.videointelligence.v1p1beta1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence"; -option java_multiple_files = true; -option java_outer_classname = "VideoIntelligenceServiceProto"; -option java_package = "com.google.cloud.videointelligence.v1p1beta1"; -option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p1beta1"; -option ruby_package = "Google::Cloud::VideoIntelligence::V1p1beta1"; - -// Service that implements Google Cloud Video Intelligence API. -service VideoIntelligenceService { - option (google.api.default_host) = "videointelligence.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; - - // Performs asynchronous video annotation. Progress and results can be - // retrieved through the `google.longrunning.Operations` interface. - // `Operation.metadata` contains `AnnotateVideoProgress` (progress). - // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1p1beta1/videos:annotate" - body: "*" - }; - option (google.api.method_signature) = "input_uri,features"; - option (google.longrunning.operation_info) = { - response_type: "AnnotateVideoResponse" - metadata_type: "AnnotateVideoProgress" - }; - } -} - -// Video annotation request. -message AnnotateVideoRequest { - // Input video location. Currently, only - // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are - // supported, which must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request - // URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI - // may include wildcards in `object-id`, and thus identify multiple videos. - // Supported wildcards: '*' to match 0 or more characters; - // '?' to match 1 character. If unset, the input video should be embedded - // in the request as `input_content`. If set, `input_content` should be unset. - string input_uri = 1; - - // The video data bytes. - // If unset, the input video(s) should be specified via `input_uri`. - // If set, `input_uri` should be unset. - bytes input_content = 6; - - // Required. Requested video annotation features. - repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; - - // Additional video context and/or feature-specific parameters. - VideoContext video_context = 3; - - // Optional. Location where the output (in JSON format) should be stored. - // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) - // URIs are supported, which must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request - // URIs](https://cloud.google.com/storage/docs/request-endpoints). - string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Cloud region where annotation should take place. Supported cloud - // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - // is specified, a region will be determined based on video file location. - string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; -} - -// Video context and/or feature-specific parameters. -message VideoContext { - // Video segments to annotate. The segments may overlap and are not required - // to be contiguous or span the whole video. If unspecified, each video is - // treated as a single segment. - repeated VideoSegment segments = 1; - - // Config for LABEL_DETECTION. - LabelDetectionConfig label_detection_config = 2; - - // Config for SHOT_CHANGE_DETECTION. - ShotChangeDetectionConfig shot_change_detection_config = 3; - - // Config for EXPLICIT_CONTENT_DETECTION. - ExplicitContentDetectionConfig explicit_content_detection_config = 4; - - // Config for SPEECH_TRANSCRIPTION. - SpeechTranscriptionConfig speech_transcription_config = 6; -} - -// Config for LABEL_DETECTION. -message LabelDetectionConfig { - // What labels should be detected with LABEL_DETECTION, in addition to - // video-level labels or segment-level labels. - // If unspecified, defaults to `SHOT_MODE`. - LabelDetectionMode label_detection_mode = 1; - - // Whether the video has been shot from a stationary (i.e. non-moving) camera. - // When set to true, might improve detection accuracy for moving objects. - // Should be used with `SHOT_AND_FRAME_MODE` enabled. - bool stationary_camera = 2; - - // Model to use for label detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 3; -} - -// Config for SHOT_CHANGE_DETECTION. -message ShotChangeDetectionConfig { - // Model to use for shot change detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for EXPLICIT_CONTENT_DETECTION. -message ExplicitContentDetectionConfig { - // Model to use for explicit content detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Video segment. -message VideoSegment { - // Time-offset, relative to the beginning of the video, - // corresponding to the start of the segment (inclusive). - google.protobuf.Duration start_time_offset = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the end of the segment (inclusive). - google.protobuf.Duration end_time_offset = 2; -} - -// Video segment level annotation results for label detection. -message LabelSegment { - // Video segment where a label was detected. - VideoSegment segment = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Video frame level annotation results for label detection. -message LabelFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Detected entity from video analysis. -message Entity { - // Opaque entity ID. Some IDs may be available in - // [Google Knowledge Graph Search - // API](https://developers.google.com/knowledge-graph/). - string entity_id = 1; - - // Textual description, e.g. `Fixed-gear bicycle`. - string description = 2; - - // Language code for `description` in BCP-47 format. - string language_code = 3; -} - -// Label annotation. -message LabelAnnotation { - // Detected entity. - Entity entity = 1; - - // Common categories for the detected entity. - // E.g. when the label is `Terrier` the category is likely `dog`. And in some - // cases there might be more than one categories e.g. `Terrier` could also be - // a `pet`. - repeated Entity category_entities = 2; - - // All video segments where a label was detected. - repeated LabelSegment segments = 3; - - // All video frames where a label was detected. - repeated LabelFrame frames = 4; -} - -// Video frame level annotation results for explicit content. -message ExplicitContentFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Likelihood of the pornography content.. - Likelihood pornography_likelihood = 2; -} - -// Explicit content annotation (based on per-frame visual signals only). -// If no explicit content has been detected in a frame, no annotations are -// present for that frame. -message ExplicitContentAnnotation { - // All video frames where explicit content was detected. - repeated ExplicitContentFrame frames = 1; -} - -// Annotation results for a single video. -message VideoAnnotationResults { - // Output only. Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Label annotations on video level or user specified segment level. - // There is exactly one element for each unique label. - repeated LabelAnnotation segment_label_annotations = 2; - - // Label annotations on shot level. - // There is exactly one element for each unique label. - repeated LabelAnnotation shot_label_annotations = 3; - - // Label annotations on frame level. - // There is exactly one element for each unique label. - repeated LabelAnnotation frame_label_annotations = 4; - - // Shot annotations. Each shot is represented as a video segment. - repeated VideoSegment shot_annotations = 6; - - // Explicit content annotation. - ExplicitContentAnnotation explicit_annotation = 7; - - // Speech transcription. - repeated SpeechTranscription speech_transcriptions = 11; - - // Output only. If set, indicates an error. Note that for a single - // `AnnotateVideoRequest` some videos may succeed and some may fail. - google.rpc.Status error = 9; -} - -// Video annotation response. Included in the `response` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoResponse { - // Annotation results for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationResults annotation_results = 1; -} - -// Annotation progress for a single video. -message VideoAnnotationProgress { - // Output only. Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Output only. Approximate percentage processed thus far. Guaranteed to be - // 100 when fully processed. - int32 progress_percent = 2; - - // Output only. Time when the request was received. - google.protobuf.Timestamp start_time = 3; - - // Output only. Time of the most recent update. - google.protobuf.Timestamp update_time = 4; -} - -// Video annotation progress. Included in the `metadata` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoProgress { - // Progress metadata for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationProgress annotation_progress = 1; -} - -// Config for SPEECH_TRANSCRIPTION. -message SpeechTranscriptionConfig { - // Required. *Required* The language of the supplied audio as a - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. - // Example: "en-US". - // See [Language Support](https://cloud.google.com/speech/docs/languages) - // for a list of the currently supported language codes. - string language_code = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Maximum number of recognition hypotheses to be returned. - // Specifically, the maximum number of `SpeechRecognitionAlternative` messages - // within each `SpeechTranscription`. The server may return fewer than - // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will - // return a maximum of one. If omitted, will return a maximum of one. - int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If set to `true`, the server will attempt to filter out - // profanities, replacing all but the initial character in each filtered word - // with asterisks, e.g. "f***". If set to `false` or omitted, profanities - // won't be filtered out. - bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A means to provide context to assist the speech recognition. - repeated SpeechContext speech_contexts = 4 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If 'true', adds punctuation to recognition result hypotheses. - // This feature is only available in select languages. Setting this for - // requests in other languages has no effect at all. The default 'false' value - // does not add punctuation to result hypotheses. NOTE: "This is currently - // offered as an experimental service, complimentary to all users. In the - // future this may be exclusively available as a premium feature." - bool enable_automatic_punctuation = 5 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. For file formats, such as MXF or MKV, supporting multiple audio - // tracks, specify up to two tracks. Default: track 0. - repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; -} - -// Provides "hints" to the speech recognizer to favor specific words and phrases -// in the results. -message SpeechContext { - // Optional. A list of strings containing words and phrases "hints" so that - // the speech recognition is more likely to recognize them. This can be used - // to improve the accuracy for specific words and phrases, for example, if - // specific commands are typically spoken by the user. This can also be used - // to add additional words to the vocabulary of the recognizer. See - // [usage limits](https://cloud.google.com/speech/limits#content). - repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; -} - -// A speech recognition result corresponding to a portion of the audio. -message SpeechTranscription { - // May contain one or more recognition hypotheses (up to the maximum specified - // in `max_alternatives`). These alternatives are ordered in terms of - // accuracy, with the top (first) alternative being the most probable, as - // ranked by the recognizer. - repeated SpeechRecognitionAlternative alternatives = 1; -} - -// Alternative hypotheses (a.k.a. n-best list). -message SpeechRecognitionAlternative { - // Output only. Transcript text representing the words that the user spoke. - string transcript = 1; - - // Output only. The confidence estimate between 0.0 and 1.0. A higher number - // indicates an estimated greater likelihood that the recognized words are - // correct. This field is set only for the top alternative. - // This field is not guaranteed to be accurate and users should not rely on it - // to be always provided. - // The default of 0.0 is a sentinel value indicating `confidence` was not set. - float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A list of word-specific information for each recognized word. - repeated WordInfo words = 3; -} - -// Word-specific information for recognized words. Word information is only -// included in the response when certain request parameters are set, such -// as `enable_word_time_offsets`. -message WordInfo { - // Output only. Time offset relative to the beginning of the audio, and - // corresponding to the start of the spoken word. This field is only set if - // `enable_word_time_offsets=true` and only in the top hypothesis. This is an - // experimental feature and the accuracy of the time offset can vary. - google.protobuf.Duration start_time = 1; - - // Output only. Time offset relative to the beginning of the audio, and - // corresponding to the end of the spoken word. This field is only set if - // `enable_word_time_offsets=true` and only in the top hypothesis. This is an - // experimental feature and the accuracy of the time offset can vary. - google.protobuf.Duration end_time = 2; - - // Output only. The word corresponding to this set of information. - string word = 3; -} - -// Video annotation feature. -enum Feature { - // Unspecified. - FEATURE_UNSPECIFIED = 0; - - // Label detection. Detect objects, such as dog or flower. - LABEL_DETECTION = 1; - - // Shot change detection. - SHOT_CHANGE_DETECTION = 2; - - // Explicit content detection. - EXPLICIT_CONTENT_DETECTION = 3; - - // Speech transcription. - SPEECH_TRANSCRIPTION = 6; -} - -// Label detection mode. -enum LabelDetectionMode { - // Unspecified. - LABEL_DETECTION_MODE_UNSPECIFIED = 0; - - // Detect shot-level labels. - SHOT_MODE = 1; - - // Detect frame-level labels. - FRAME_MODE = 2; - - // Detect both shot-level and frame-level labels. - SHOT_AND_FRAME_MODE = 3; -} - -// Bucketized representation of likelihood. -enum Likelihood { - // Unspecified likelihood. - LIKELIHOOD_UNSPECIFIED = 0; - - // Very unlikely. - VERY_UNLIKELY = 1; - - // Unlikely. - UNLIKELY = 2; - - // Possible. - POSSIBLE = 3; - - // Likely. - LIKELY = 4; - - // Very likely. - VERY_LIKELY = 5; -} diff --git a/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/snippet_metadata.google.cloud.videointelligence.v1p1beta1.json b/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/snippet_metadata.google.cloud.videointelligence.v1p1beta1.json deleted file mode 100644 index 62b702ba..00000000 --- a/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/snippet_metadata.google.cloud.videointelligence.v1p1beta1.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-videointelligence", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.videointelligence.v1p1beta1", - "version": "v1p1beta1" - } - ] - }, - "snippets": [ - { - "regionTag": "videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async", - "title": "videointelligence annotateVideo Sample", - "origin": "API_DEFINITION", - "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", - "canonical": true, - "file": "video_intelligence_service.annotate_video.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 91, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService.AnnotateVideo", - "async": true, - "parameters": [ - { - "name": "input_uri", - "type": "TYPE_STRING" - }, - { - "name": "input_content", - "type": "TYPE_BYTES" - }, - { - "name": "features", - "type": "TYPE_ENUM[]" - }, - { - "name": "video_context", - "type": ".google.cloud.videointelligence.v1p1beta1.VideoContext" - }, - { - "name": "output_uri", - "type": "TYPE_STRING" - }, - { - "name": "location_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "VideoIntelligenceServiceClient", - "fullName": "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient" - }, - "method": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService.AnnotateVideo", - "service": { - "shortName": "VideoIntelligenceService", - "fullName": "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService" - } - } - } - } - ] -} diff --git a/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js deleted file mode 100644 index ccf9f33c..00000000 --- a/owl-bot-staging/v1p1beta1/samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(features) { - // [START videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Input video location. Currently, only - * Google Cloud Storage (https://cloud.google.com/storage/) URIs are - * supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For - * more information, see Request - * URIs (https://cloud.google.com/storage/docs/request-endpoints). A video URI - * may include wildcards in `object-id`, and thus identify multiple videos. - * Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` should be unset. - */ - // const inputUri = 'abc123' - /** - * The video data bytes. - * If unset, the input video(s) should be specified via `input_uri`. - * If set, `input_uri` should be unset. - */ - // const inputContent = 'Buffer.from('string')' - /** - * Required. Requested video annotation features. - */ - // const features = 1234 - /** - * Additional video context and/or feature-specific parameters. - */ - // const videoContext = {} - /** - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only Google Cloud Storage (https://cloud.google.com/storage/) - * URIs are supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For - * more information, see Request - * URIs (https://cloud.google.com/storage/docs/request-endpoints). - */ - // const outputUri = 'abc123' - /** - * Optional. Cloud region where annotation should take place. Supported cloud - * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - * is specified, a region will be determined based on video file location. - */ - // const locationId = 'abc123' - - // Imports the Videointelligence library - const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1p1beta1; - - // Instantiates a client - const videointelligenceClient = new VideoIntelligenceServiceClient(); - - async function callAnnotateVideo() { - // Construct request - const request = { - features, - }; - - // Run request - const [operation] = await videointelligenceClient.annotateVideo(request); - const [response] = await operation.promise(); - console.log(response); - } - - callAnnotateVideo(); - // [END videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1p1beta1/src/index.ts b/owl-bot-staging/v1p1beta1/src/index.ts deleted file mode 100644 index 25d2cfaa..00000000 --- a/owl-bot-staging/v1p1beta1/src/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1p1beta1 from './v1p1beta1'; -const VideoIntelligenceServiceClient = v1p1beta1.VideoIntelligenceServiceClient; -type VideoIntelligenceServiceClient = v1p1beta1.VideoIntelligenceServiceClient; -export {v1p1beta1, VideoIntelligenceServiceClient}; -export default {v1p1beta1, VideoIntelligenceServiceClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/gapic_metadata.json b/owl-bot-staging/v1p1beta1/src/v1p1beta1/gapic_metadata.json deleted file mode 100644 index 624b1379..00000000 --- a/owl-bot-staging/v1p1beta1/src/v1p1beta1/gapic_metadata.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.videointelligence.v1p1beta1", - "libraryPackage": "@google-cloud/video-intelligence", - "services": { - "VideoIntelligenceService": { - "clients": { - "grpc": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/index.ts b/owl-bot-staging/v1p1beta1/src/v1p1beta1/index.ts deleted file mode 100644 index 6fcd1933..00000000 --- a/owl-bot-staging/v1p1beta1/src/v1p1beta1/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client.ts b/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client.ts deleted file mode 100644 index faf95026..00000000 --- a/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client.ts +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; - -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1p1beta1/video_intelligence_service_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './video_intelligence_service_client_config.json'; -import { operationsProtos } from 'google-gax'; -const version = require('../../../package.json').version; - -/** - * Service that implements Google Cloud Video Intelligence API. - * @class - * @memberof v1p1beta1 - */ -export class VideoIntelligenceServiceClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - operationsClient: gax.OperationsClient; - videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of VideoIntelligenceServiceClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - */ - constructor(opts?: ClientOptions) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gax.fallback : gax; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.longrunning.Operations.ListOperations',get: '/v1p1beta1/{name=projects/*/locations/*}/operations',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1p1beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1p1beta1/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1p1beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1p1beta1/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1p1beta1/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1p1beta1/operations/{name=projects/*/locations/*/operations/*}:cancel',}], - }]; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const annotateVideoResponse = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse') as gax.protobuf.Type; - const annotateVideoMetadata = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress') as gax.protobuf.Type; - - this.descriptors.longrunning = { - annotateVideo: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - annotateVideoResponse.decode.bind(annotateVideoResponse), - annotateVideoMetadata.decode.bind(annotateVideoMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = gax.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.videoIntelligenceServiceStub) { - return this.videoIntelligenceServiceStub; - } - - // Put together the "service stub" for - // google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService. - this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const videoIntelligenceServiceStubMethods = - ['annotateVideo']; - for (const methodName of videoIntelligenceServiceStubMethods) { - const callPromise = this.videoIntelligenceServiceStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.videoIntelligenceServiceStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'videointelligence.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'videointelligence.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- - -/** - * Performs asynchronous video annotation. Progress and results can be - * retrieved through the `google.longrunning.Operations` interface. - * `Operation.metadata` contains `AnnotateVideoProgress` (progress). - * `Operation.response` contains `AnnotateVideoResponse` (results). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.inputUri - * Input video location. Currently, only - * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are - * supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For - * more information, see [Request - * URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI - * may include wildcards in `object-id`, and thus identify multiple videos. - * Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` should be unset. - * @param {Buffer} request.inputContent - * The video data bytes. - * If unset, the input video(s) should be specified via `input_uri`. - * If set, `input_uri` should be unset. - * @param {number[]} request.features - * Required. Requested video annotation features. - * @param {google.cloud.videointelligence.v1p1beta1.VideoContext} request.videoContext - * Additional video context and/or feature-specific parameters. - * @param {string} [request.outputUri] - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) - * URIs are supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For - * more information, see [Request - * URIs](https://cloud.google.com/storage/docs/request-endpoints). - * @param {string} [request.locationId] - * Optional. Cloud region where annotation should take place. Supported cloud - * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - * is specified, a region will be determined based on video file location. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async - */ - annotateVideo( - request?: protos.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - annotateVideo( - request: protos.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request: protos.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request?: protos.google.cloud.videointelligence.v1p1beta1.IAnnotateVideoRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - this.initialize(); - return this.innerApiCalls.annotateVideo(request, options, callback); - } -/** - * Check the status of the long running operation returned by `annotateVideo()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1p1beta1/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1p1beta1_generated_VideoIntelligenceService_AnnotateVideo_async - */ - async checkAnnotateVideoProgress(name: string): Promise>{ - const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.videoIntelligenceServiceStub && !this._terminated) { - return this.videoIntelligenceServiceStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client_config.json b/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client_config.json deleted file mode 100644 index 5d0c24db..00000000 --- a/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_client_config.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "interfaces": { - "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - }, - "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.5, - "max_retry_delay_millis": 120000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "AnnotateVideo": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" - } - } - } - } -} diff --git a/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_proto_list.json b/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_proto_list.json deleted file mode 100644 index 4213216a..00000000 --- a/owl-bot-staging/v1p1beta1/src/v1p1beta1/video_intelligence_service_proto_list.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - "../../protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto" -] diff --git a/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.js deleted file mode 100644 index 85a71c33..00000000 --- a/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const videointelligence = require('@google-cloud/video-intelligence'); - -function main() { - const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); -} - -main(); diff --git a/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index d466c7b0..00000000 --- a/owl-bot-staging/v1p1beta1/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; - -// check that the client class type name can be used -function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); - doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); -} - -main(); diff --git a/owl-bot-staging/v1p1beta1/system-test/install.ts b/owl-bot-staging/v1p1beta1/system-test/install.ts deleted file mode 100644 index 8ec45222..00000000 --- a/owl-bot-staging/v1p1beta1/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import { packNTest } from 'pack-n-play'; -import { readFileSync } from 'fs'; -import { describe, it } from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/v1p1beta1/test/gapic_video_intelligence_service_v1p1beta1.ts b/owl-bot-staging/v1p1beta1/test/gapic_video_intelligence_service_v1p1beta1.ts deleted file mode 100644 index e54dd58d..00000000 --- a/owl-bot-staging/v1p1beta1/test/gapic_video_intelligence_service_v1p1beta1.ts +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import { describe, it } from 'mocha'; -import * as videointelligenceserviceModule from '../src'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -describe('v1p1beta1.VideoIntelligenceServiceClient', () => { - it('has servicePath', () => { - const servicePath = videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - await client.initialize(); - assert(client.videoIntelligenceServiceStub); - }); - - it('has close method for the initialized client', done => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.videoIntelligenceServiceStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - - describe('annotateVideo', () => { - it('invokes annotateVideo without error', async () => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); - const [operation] = await client.annotateVideo(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo without error using callback', async () => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.annotateVideo( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes annotateVideo with call error', async () => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.annotateVideo(request), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo with LRO error', async () => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.annotateVideo(request); - await assert.rejects(operation.promise(), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes checkAnnotateVideoProgress without error', async () => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkAnnotateVideoProgress with error', async () => { - const client = new videointelligenceserviceModule.v1p1beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); -}); diff --git a/owl-bot-staging/v1p1beta1/tsconfig.json b/owl-bot-staging/v1p1beta1/tsconfig.json deleted file mode 100644 index c78f1c88..00000000 --- a/owl-bot-staging/v1p1beta1/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/v1p1beta1/webpack.config.js b/owl-bot-staging/v1p1beta1/webpack.config.js deleted file mode 100644 index 9657601b..00000000 --- a/owl-bot-staging/v1p1beta1/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'videointelligence', - filename: './videointelligence.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/owl-bot-staging/v1p2beta1/.eslintignore b/owl-bot-staging/v1p2beta1/.eslintignore deleted file mode 100644 index cfc348ec..00000000 --- a/owl-bot-staging/v1p2beta1/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/v1p2beta1/.eslintrc.json b/owl-bot-staging/v1p2beta1/.eslintrc.json deleted file mode 100644 index 78215349..00000000 --- a/owl-bot-staging/v1p2beta1/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/v1p2beta1/.gitignore b/owl-bot-staging/v1p2beta1/.gitignore deleted file mode 100644 index 5d32b237..00000000 --- a/owl-bot-staging/v1p2beta1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/v1p2beta1/.jsdoc.js b/owl-bot-staging/v1p2beta1/.jsdoc.js deleted file mode 100644 index 6c816e68..00000000 --- a/owl-bot-staging/v1p2beta1/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/video-intelligence', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/v1p2beta1/.mocharc.js b/owl-bot-staging/v1p2beta1/.mocharc.js deleted file mode 100644 index 481c522b..00000000 --- a/owl-bot-staging/v1p2beta1/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/v1p2beta1/.prettierrc.js b/owl-bot-staging/v1p2beta1/.prettierrc.js deleted file mode 100644 index 494e1478..00000000 --- a/owl-bot-staging/v1p2beta1/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/v1p2beta1/README.md b/owl-bot-staging/v1p2beta1/README.md deleted file mode 100644 index d1c53e8c..00000000 --- a/owl-bot-staging/v1p2beta1/README.md +++ /dev/null @@ -1 +0,0 @@ -Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1p2beta1/linkinator.config.json b/owl-bot-staging/v1p2beta1/linkinator.config.json deleted file mode 100644 index befd23c8..00000000 --- a/owl-bot-staging/v1p2beta1/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/v1p2beta1/package.json b/owl-bot-staging/v1p2beta1/package.json deleted file mode 100644 index 6b17fa2c..00000000 --- a/owl-bot-staging/v1p2beta1/package.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "name": "@google-cloud/video-intelligence", - "version": "0.1.0", - "description": "Videointelligence client for Node.js", - "repository": "googleapis/nodejs-videointelligence", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google videointelligence", - "videointelligence", - "video intelligence service" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^3.1.1" - }, - "devDependencies": { - "@types/mocha": "^9.1.0", - "@types/node": "^16.0.0", - "@types/sinon": "^10.0.8", - "c8": "^7.11.0", - "gts": "^3.1.0", - "jsdoc": "^3.6.7", - "jsdoc-fresh": "^1.1.1", - "jsdoc-region-tag": "^1.3.1", - "linkinator": "^3.0.0", - "mocha": "^9.1.4", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^13.0.0", - "ts-loader": "^9.2.6", - "typescript": "^4.5.5", - "webpack": "^5.67.0", - "webpack-cli": "^4.9.1" - }, - "engines": { - "node": ">=v12" - } -} diff --git a/owl-bot-staging/v1p2beta1/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto b/owl-bot-staging/v1p2beta1/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto deleted file mode 100644 index c185c0aa..00000000 --- a/owl-bot-staging/v1p2beta1/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.cloud.videointelligence.v1p2beta1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P2Beta1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence"; -option java_multiple_files = true; -option java_outer_classname = "VideoIntelligenceServiceProto"; -option java_package = "com.google.cloud.videointelligence.v1p2beta1"; -option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p2beta1"; -option ruby_package = "Google::Cloud::VideoIntelligence::V1p2beta1"; - -// Service that implements Google Cloud Video Intelligence API. -service VideoIntelligenceService { - option (google.api.default_host) = "videointelligence.googleapis.com"; - option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; - - // Performs asynchronous video annotation. Progress and results can be - // retrieved through the `google.longrunning.Operations` interface. - // `Operation.metadata` contains `AnnotateVideoProgress` (progress). - // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1p2beta1/videos:annotate" - body: "*" - }; - option (google.api.method_signature) = "input_uri,features"; - option (google.longrunning.operation_info) = { - response_type: "AnnotateVideoResponse" - metadata_type: "AnnotateVideoProgress" - }; - } -} - -// Video annotation request. -message AnnotateVideoRequest { - // Input video location. Currently, only - // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are - // supported, which must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see - // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). - // A video URI may include wildcards in `object-id`, and thus identify - // multiple videos. Supported wildcards: '*' to match 0 or more characters; - // '?' to match 1 character. If unset, the input video should be embedded - // in the request as `input_content`. If set, `input_content` should be unset. - string input_uri = 1; - - // The video data bytes. - // If unset, the input video(s) should be specified via `input_uri`. - // If set, `input_uri` should be unset. - bytes input_content = 6; - - // Required. Requested video annotation features. - repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; - - // Additional video context and/or feature-specific parameters. - VideoContext video_context = 3; - - // Optional. Location where the output (in JSON format) should be stored. - // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) - // URIs are supported, which must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see - // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). - string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Cloud region where annotation should take place. Supported cloud - // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - // is specified, a region will be determined based on video file location. - string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; -} - -// Video context and/or feature-specific parameters. -message VideoContext { - // Video segments to annotate. The segments may overlap and are not required - // to be contiguous or span the whole video. If unspecified, each video is - // treated as a single segment. - repeated VideoSegment segments = 1; - - // Config for LABEL_DETECTION. - LabelDetectionConfig label_detection_config = 2; - - // Config for SHOT_CHANGE_DETECTION. - ShotChangeDetectionConfig shot_change_detection_config = 3; - - // Config for EXPLICIT_CONTENT_DETECTION. - ExplicitContentDetectionConfig explicit_content_detection_config = 4; - - // Config for TEXT_DETECTION. - TextDetectionConfig text_detection_config = 8; -} - -// Config for LABEL_DETECTION. -message LabelDetectionConfig { - // What labels should be detected with LABEL_DETECTION, in addition to - // video-level labels or segment-level labels. - // If unspecified, defaults to `SHOT_MODE`. - LabelDetectionMode label_detection_mode = 1; - - // Whether the video has been shot from a stationary (i.e. non-moving) camera. - // When set to true, might improve detection accuracy for moving objects. - // Should be used with `SHOT_AND_FRAME_MODE` enabled. - bool stationary_camera = 2; - - // Model to use for label detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 3; -} - -// Config for SHOT_CHANGE_DETECTION. -message ShotChangeDetectionConfig { - // Model to use for shot change detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for EXPLICIT_CONTENT_DETECTION. -message ExplicitContentDetectionConfig { - // Model to use for explicit content detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for TEXT_DETECTION. -message TextDetectionConfig { - // Language hint can be specified if the language to be detected is known a - // priori. It can increase the accuracy of the detection. Language hint must - // be language code in BCP-47 format. - // - // Automatic language detection is performed if no hint is provided. - repeated string language_hints = 1; -} - -// Video segment. -message VideoSegment { - // Time-offset, relative to the beginning of the video, - // corresponding to the start of the segment (inclusive). - google.protobuf.Duration start_time_offset = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the end of the segment (inclusive). - google.protobuf.Duration end_time_offset = 2; -} - -// Video segment level annotation results for label detection. -message LabelSegment { - // Video segment where a label was detected. - VideoSegment segment = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Video frame level annotation results for label detection. -message LabelFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Detected entity from video analysis. -message Entity { - // Opaque entity ID. Some IDs may be available in - // [Google Knowledge Graph Search - // API](https://developers.google.com/knowledge-graph/). - string entity_id = 1; - - // Textual description, e.g. `Fixed-gear bicycle`. - string description = 2; - - // Language code for `description` in BCP-47 format. - string language_code = 3; -} - -// Label annotation. -message LabelAnnotation { - // Detected entity. - Entity entity = 1; - - // Common categories for the detected entity. - // E.g. when the label is `Terrier` the category is likely `dog`. And in some - // cases there might be more than one categories e.g. `Terrier` could also be - // a `pet`. - repeated Entity category_entities = 2; - - // All video segments where a label was detected. - repeated LabelSegment segments = 3; - - // All video frames where a label was detected. - repeated LabelFrame frames = 4; -} - -// Video frame level annotation results for explicit content. -message ExplicitContentFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Likelihood of the pornography content.. - Likelihood pornography_likelihood = 2; -} - -// Explicit content annotation (based on per-frame visual signals only). -// If no explicit content has been detected in a frame, no annotations are -// present for that frame. -message ExplicitContentAnnotation { - // All video frames where explicit content was detected. - repeated ExplicitContentFrame frames = 1; -} - -// Normalized bounding box. -// The normalized vertex coordinates are relative to the original image. -// Range: [0, 1]. -message NormalizedBoundingBox { - // Left X coordinate. - float left = 1; - - // Top Y coordinate. - float top = 2; - - // Right X coordinate. - float right = 3; - - // Bottom Y coordinate. - float bottom = 4; -} - -// Annotation results for a single video. -message VideoAnnotationResults { - // Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Label annotations on video level or user specified segment level. - // There is exactly one element for each unique label. - repeated LabelAnnotation segment_label_annotations = 2; - - // Label annotations on shot level. - // There is exactly one element for each unique label. - repeated LabelAnnotation shot_label_annotations = 3; - - // Label annotations on frame level. - // There is exactly one element for each unique label. - repeated LabelAnnotation frame_label_annotations = 4; - - // Shot annotations. Each shot is represented as a video segment. - repeated VideoSegment shot_annotations = 6; - - // Explicit content annotation. - ExplicitContentAnnotation explicit_annotation = 7; - - // OCR text detection and tracking. - // Annotations for list of detected text snippets. Each will have list of - // frame information associated with it. - repeated TextAnnotation text_annotations = 12; - - // Annotations for list of objects detected and tracked in video. - repeated ObjectTrackingAnnotation object_annotations = 14; - - // If set, indicates an error. Note that for a single `AnnotateVideoRequest` - // some videos may succeed and some may fail. - google.rpc.Status error = 9; -} - -// Video annotation response. Included in the `response` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoResponse { - // Annotation results for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationResults annotation_results = 1; -} - -// Annotation progress for a single video. -message VideoAnnotationProgress { - // Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Approximate percentage processed thus far. Guaranteed to be - // 100 when fully processed. - int32 progress_percent = 2; - - // Time when the request was received. - google.protobuf.Timestamp start_time = 3; - - // Time of the most recent update. - google.protobuf.Timestamp update_time = 4; -} - -// Video annotation progress. Included in the `metadata` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoProgress { - // Progress metadata for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationProgress annotation_progress = 1; -} - -// A vertex represents a 2D point in the image. -// NOTE: the normalized vertex coordinates are relative to the original image -// and range from 0 to 1. -message NormalizedVertex { - // X coordinate. - float x = 1; - - // Y coordinate. - float y = 2; -} - -// Normalized bounding polygon for text (that might not be aligned with axis). -// Contains list of the corner points in clockwise order starting from -// top-left corner. For example, for a rectangular bounding box: -// When the text is horizontal it might look like: -// 0----1 -// | | -// 3----2 -// -// When it's clockwise rotated 180 degrees around the top-left corner it -// becomes: -// 2----3 -// | | -// 1----0 -// -// and the vertex order will still be (0, 1, 2, 3). Note that values can be less -// than 0, or greater than 1 due to trignometric calculations for location of -// the box. -message NormalizedBoundingPoly { - // Normalized vertices of the bounding polygon. - repeated NormalizedVertex vertices = 1; -} - -// Video segment level annotation results for text detection. -message TextSegment { - // Video segment where a text snippet was detected. - VideoSegment segment = 1; - - // Confidence for the track of detected text. It is calculated as the highest - // over all frames where OCR detected text appears. - float confidence = 2; - - // Information related to the frames where OCR detected text appears. - repeated TextFrame frames = 3; -} - -// Video frame level annotation results for text annotation (OCR). -// Contains information regarding timestamp and bounding box locations for the -// frames containing detected OCR text snippets. -message TextFrame { - // Bounding polygon of the detected text for this frame. - NormalizedBoundingPoly rotated_bounding_box = 1; - - // Timestamp of this frame. - google.protobuf.Duration time_offset = 2; -} - -// Annotations related to one detected OCR text snippet. This will contain the -// corresponding text, confidence value, and frame level information for each -// detection. -message TextAnnotation { - // The detected text. - string text = 1; - - // All video segments where OCR detected text appears. - repeated TextSegment segments = 2; -} - -// Video frame level annotations for object detection and tracking. This field -// stores per frame location, time offset, and confidence. -message ObjectTrackingFrame { - // The normalized bounding box location of this object track for the frame. - NormalizedBoundingBox normalized_bounding_box = 1; - - // The timestamp of the frame in microseconds. - google.protobuf.Duration time_offset = 2; -} - -// Annotations corresponding to one tracked object. -message ObjectTrackingAnnotation { - // Different representation of tracking info in non-streaming batch - // and streaming modes. - oneof track_info { - // Non-streaming batch mode ONLY. - // Each object track corresponds to one video segment where it appears. - VideoSegment segment = 3; - - // Streaming mode ONLY. - // In streaming mode, we do not know the end time of a tracked object - // before it is completed. Hence, there is no VideoSegment info returned. - // Instead, we provide a unique identifiable integer track_id so that - // the customers can correlate the results of the ongoing - // ObjectTrackAnnotation of the same track_id over time. - int64 track_id = 5; - } - - // Entity to specify the object category that this track is labeled as. - Entity entity = 1; - - // Object category's labeling confidence of this track. - float confidence = 4; - - // Information corresponding to all frames where this object track appears. - repeated ObjectTrackingFrame frames = 2; -} - -// Video annotation feature. -enum Feature { - // Unspecified. - FEATURE_UNSPECIFIED = 0; - - // Label detection. Detect objects, such as dog or flower. - LABEL_DETECTION = 1; - - // Shot change detection. - SHOT_CHANGE_DETECTION = 2; - - // Explicit content detection. - EXPLICIT_CONTENT_DETECTION = 3; - - // OCR text detection and tracking. - TEXT_DETECTION = 7; - - // Object detection and tracking. - OBJECT_TRACKING = 9; -} - -// Label detection mode. -enum LabelDetectionMode { - // Unspecified. - LABEL_DETECTION_MODE_UNSPECIFIED = 0; - - // Detect shot-level labels. - SHOT_MODE = 1; - - // Detect frame-level labels. - FRAME_MODE = 2; - - // Detect both shot-level and frame-level labels. - SHOT_AND_FRAME_MODE = 3; -} - -// Bucketized representation of likelihood. -enum Likelihood { - // Unspecified likelihood. - LIKELIHOOD_UNSPECIFIED = 0; - - // Very unlikely. - VERY_UNLIKELY = 1; - - // Unlikely. - UNLIKELY = 2; - - // Possible. - POSSIBLE = 3; - - // Likely. - LIKELY = 4; - - // Very likely. - VERY_LIKELY = 5; -} diff --git a/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/snippet_metadata.google.cloud.videointelligence.v1p2beta1.json b/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/snippet_metadata.google.cloud.videointelligence.v1p2beta1.json deleted file mode 100644 index 253c9b0b..00000000 --- a/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/snippet_metadata.google.cloud.videointelligence.v1p2beta1.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-videointelligence", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.videointelligence.v1p2beta1", - "version": "v1p2beta1" - } - ] - }, - "snippets": [ - { - "regionTag": "videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async", - "title": "videointelligence annotateVideo Sample", - "origin": "API_DEFINITION", - "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", - "canonical": true, - "file": "video_intelligence_service.annotate_video.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 89, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService.AnnotateVideo", - "async": true, - "parameters": [ - { - "name": "input_uri", - "type": "TYPE_STRING" - }, - { - "name": "input_content", - "type": "TYPE_BYTES" - }, - { - "name": "features", - "type": "TYPE_ENUM[]" - }, - { - "name": "video_context", - "type": ".google.cloud.videointelligence.v1p2beta1.VideoContext" - }, - { - "name": "output_uri", - "type": "TYPE_STRING" - }, - { - "name": "location_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "VideoIntelligenceServiceClient", - "fullName": "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceServiceClient" - }, - "method": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService.AnnotateVideo", - "service": { - "shortName": "VideoIntelligenceService", - "fullName": "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService" - } - } - } - } - ] -} diff --git a/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js deleted file mode 100644 index f4e003e8..00000000 --- a/owl-bot-staging/v1p2beta1/samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(features) { - // [START videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Input video location. Currently, only - * Google Cloud Storage (https://cloud.google.com/storage/) URIs are - * supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For more information, see - * Request URIs (https://cloud.google.com/storage/docs/request-endpoints). - * A video URI may include wildcards in `object-id`, and thus identify - * multiple videos. Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` should be unset. - */ - // const inputUri = 'abc123' - /** - * The video data bytes. - * If unset, the input video(s) should be specified via `input_uri`. - * If set, `input_uri` should be unset. - */ - // const inputContent = 'Buffer.from('string')' - /** - * Required. Requested video annotation features. - */ - // const features = 1234 - /** - * Additional video context and/or feature-specific parameters. - */ - // const videoContext = {} - /** - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only Google Cloud Storage (https://cloud.google.com/storage/) - * URIs are supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For more information, see - * Request URIs (https://cloud.google.com/storage/docs/request-endpoints). - */ - // const outputUri = 'abc123' - /** - * Optional. Cloud region where annotation should take place. Supported cloud - * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - * is specified, a region will be determined based on video file location. - */ - // const locationId = 'abc123' - - // Imports the Videointelligence library - const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1p2beta1; - - // Instantiates a client - const videointelligenceClient = new VideoIntelligenceServiceClient(); - - async function callAnnotateVideo() { - // Construct request - const request = { - features, - }; - - // Run request - const [operation] = await videointelligenceClient.annotateVideo(request); - const [response] = await operation.promise(); - console.log(response); - } - - callAnnotateVideo(); - // [END videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1p2beta1/src/index.ts b/owl-bot-staging/v1p2beta1/src/index.ts deleted file mode 100644 index 33d5acc3..00000000 --- a/owl-bot-staging/v1p2beta1/src/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1p2beta1 from './v1p2beta1'; -const VideoIntelligenceServiceClient = v1p2beta1.VideoIntelligenceServiceClient; -type VideoIntelligenceServiceClient = v1p2beta1.VideoIntelligenceServiceClient; -export {v1p2beta1, VideoIntelligenceServiceClient}; -export default {v1p2beta1, VideoIntelligenceServiceClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/gapic_metadata.json b/owl-bot-staging/v1p2beta1/src/v1p2beta1/gapic_metadata.json deleted file mode 100644 index 701b895c..00000000 --- a/owl-bot-staging/v1p2beta1/src/v1p2beta1/gapic_metadata.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.videointelligence.v1p2beta1", - "libraryPackage": "@google-cloud/video-intelligence", - "services": { - "VideoIntelligenceService": { - "clients": { - "grpc": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/index.ts b/owl-bot-staging/v1p2beta1/src/v1p2beta1/index.ts deleted file mode 100644 index 6fcd1933..00000000 --- a/owl-bot-staging/v1p2beta1/src/v1p2beta1/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client.ts b/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client.ts deleted file mode 100644 index 6376067c..00000000 --- a/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client.ts +++ /dev/null @@ -1,440 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; - -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1p2beta1/video_intelligence_service_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './video_intelligence_service_client_config.json'; -import { operationsProtos } from 'google-gax'; -const version = require('../../../package.json').version; - -/** - * Service that implements Google Cloud Video Intelligence API. - * @class - * @memberof v1p2beta1 - */ -export class VideoIntelligenceServiceClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - operationsClient: gax.OperationsClient; - videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of VideoIntelligenceServiceClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - */ - constructor(opts?: ClientOptions) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gax.fallback : gax; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.longrunning.Operations.ListOperations',get: '/v1p2beta1/{name=projects/*/locations/*}/operations',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1p2beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1p2beta1/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1p2beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1p2beta1/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1p2beta1/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1p2beta1/operations/{name=projects/*/locations/*/operations/*}:cancel',}], - }]; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const annotateVideoResponse = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse') as gax.protobuf.Type; - const annotateVideoMetadata = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress') as gax.protobuf.Type; - - this.descriptors.longrunning = { - annotateVideo: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - annotateVideoResponse.decode.bind(annotateVideoResponse), - annotateVideoMetadata.decode.bind(annotateVideoMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = gax.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.videoIntelligenceServiceStub) { - return this.videoIntelligenceServiceStub; - } - - // Put together the "service stub" for - // google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService. - this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const videoIntelligenceServiceStubMethods = - ['annotateVideo']; - for (const methodName of videoIntelligenceServiceStubMethods) { - const callPromise = this.videoIntelligenceServiceStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.videoIntelligenceServiceStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'videointelligence.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'videointelligence.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- - -/** - * Performs asynchronous video annotation. Progress and results can be - * retrieved through the `google.longrunning.Operations` interface. - * `Operation.metadata` contains `AnnotateVideoProgress` (progress). - * `Operation.response` contains `AnnotateVideoResponse` (results). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.inputUri - * Input video location. Currently, only - * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are - * supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For more information, see - * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). - * A video URI may include wildcards in `object-id`, and thus identify - * multiple videos. Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` should be unset. - * @param {Buffer} request.inputContent - * The video data bytes. - * If unset, the input video(s) should be specified via `input_uri`. - * If set, `input_uri` should be unset. - * @param {number[]} request.features - * Required. Requested video annotation features. - * @param {google.cloud.videointelligence.v1p2beta1.VideoContext} request.videoContext - * Additional video context and/or feature-specific parameters. - * @param {string} [request.outputUri] - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) - * URIs are supported, which must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For more information, see - * [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). - * @param {string} [request.locationId] - * Optional. Cloud region where annotation should take place. Supported cloud - * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - * is specified, a region will be determined based on video file location. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async - */ - annotateVideo( - request?: protos.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - annotateVideo( - request: protos.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request: protos.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request?: protos.google.cloud.videointelligence.v1p2beta1.IAnnotateVideoRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - this.initialize(); - return this.innerApiCalls.annotateVideo(request, options, callback); - } -/** - * Check the status of the long running operation returned by `annotateVideo()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1p2beta1/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1p2beta1_generated_VideoIntelligenceService_AnnotateVideo_async - */ - async checkAnnotateVideoProgress(name: string): Promise>{ - const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.videoIntelligenceServiceStub && !this._terminated) { - return this.videoIntelligenceServiceStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client_config.json b/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client_config.json deleted file mode 100644 index 20e27528..00000000 --- a/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_client_config.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "interfaces": { - "google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - }, - "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.5, - "max_retry_delay_millis": 120000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "AnnotateVideo": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" - } - } - } - } -} diff --git a/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_proto_list.json b/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_proto_list.json deleted file mode 100644 index 64abb974..00000000 --- a/owl-bot-staging/v1p2beta1/src/v1p2beta1/video_intelligence_service_proto_list.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - "../../protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto" -] diff --git a/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.js deleted file mode 100644 index 85a71c33..00000000 --- a/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const videointelligence = require('@google-cloud/video-intelligence'); - -function main() { - const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); -} - -main(); diff --git a/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index d466c7b0..00000000 --- a/owl-bot-staging/v1p2beta1/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; - -// check that the client class type name can be used -function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); - doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); -} - -main(); diff --git a/owl-bot-staging/v1p2beta1/system-test/install.ts b/owl-bot-staging/v1p2beta1/system-test/install.ts deleted file mode 100644 index 8ec45222..00000000 --- a/owl-bot-staging/v1p2beta1/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import { packNTest } from 'pack-n-play'; -import { readFileSync } from 'fs'; -import { describe, it } from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/v1p2beta1/test/gapic_video_intelligence_service_v1p2beta1.ts b/owl-bot-staging/v1p2beta1/test/gapic_video_intelligence_service_v1p2beta1.ts deleted file mode 100644 index 6793631d..00000000 --- a/owl-bot-staging/v1p2beta1/test/gapic_video_intelligence_service_v1p2beta1.ts +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import { describe, it } from 'mocha'; -import * as videointelligenceserviceModule from '../src'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -describe('v1p2beta1.VideoIntelligenceServiceClient', () => { - it('has servicePath', () => { - const servicePath = videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - await client.initialize(); - assert(client.videoIntelligenceServiceStub); - }); - - it('has close method for the initialized client', done => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.videoIntelligenceServiceStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - - describe('annotateVideo', () => { - it('invokes annotateVideo without error', async () => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); - const [operation] = await client.annotateVideo(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo without error using callback', async () => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.annotateVideo( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes annotateVideo with call error', async () => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.annotateVideo(request), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo with LRO error', async () => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.annotateVideo(request); - await assert.rejects(operation.promise(), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes checkAnnotateVideoProgress without error', async () => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkAnnotateVideoProgress with error', async () => { - const client = new videointelligenceserviceModule.v1p2beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); -}); diff --git a/owl-bot-staging/v1p2beta1/tsconfig.json b/owl-bot-staging/v1p2beta1/tsconfig.json deleted file mode 100644 index c78f1c88..00000000 --- a/owl-bot-staging/v1p2beta1/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/v1p2beta1/webpack.config.js b/owl-bot-staging/v1p2beta1/webpack.config.js deleted file mode 100644 index 9657601b..00000000 --- a/owl-bot-staging/v1p2beta1/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'videointelligence', - filename: './videointelligence.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/owl-bot-staging/v1p3beta1/.eslintignore b/owl-bot-staging/v1p3beta1/.eslintignore deleted file mode 100644 index cfc348ec..00000000 --- a/owl-bot-staging/v1p3beta1/.eslintignore +++ /dev/null @@ -1,7 +0,0 @@ -**/node_modules -**/.coverage -build/ -docs/ -protos/ -system-test/ -samples/generated/ diff --git a/owl-bot-staging/v1p3beta1/.eslintrc.json b/owl-bot-staging/v1p3beta1/.eslintrc.json deleted file mode 100644 index 78215349..00000000 --- a/owl-bot-staging/v1p3beta1/.eslintrc.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "./node_modules/gts" -} diff --git a/owl-bot-staging/v1p3beta1/.gitignore b/owl-bot-staging/v1p3beta1/.gitignore deleted file mode 100644 index 5d32b237..00000000 --- a/owl-bot-staging/v1p3beta1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -**/*.log -**/node_modules -.coverage -coverage -.nyc_output -docs/ -out/ -build/ -system-test/secrets.js -system-test/*key.json -*.lock -.DS_Store -package-lock.json -__pycache__ diff --git a/owl-bot-staging/v1p3beta1/.jsdoc.js b/owl-bot-staging/v1p3beta1/.jsdoc.js deleted file mode 100644 index 6c816e68..00000000 --- a/owl-bot-staging/v1p3beta1/.jsdoc.js +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -'use strict'; - -module.exports = { - opts: { - readme: './README.md', - package: './package.json', - template: './node_modules/jsdoc-fresh', - recurse: true, - verbose: true, - destination: './docs/' - }, - plugins: [ - 'plugins/markdown', - 'jsdoc-region-tag' - ], - source: { - excludePattern: '(^|\\/|\\\\)[._]', - include: [ - 'build/src', - 'protos' - ], - includePattern: '\\.js$' - }, - templates: { - copyright: 'Copyright 2022 Google LLC', - includeDate: false, - sourceFiles: false, - systemName: '@google-cloud/video-intelligence', - theme: 'lumen', - default: { - outputSourceFiles: false - } - }, - markdown: { - idInHeadings: true - } -}; diff --git a/owl-bot-staging/v1p3beta1/.mocharc.js b/owl-bot-staging/v1p3beta1/.mocharc.js deleted file mode 100644 index 481c522b..00000000 --- a/owl-bot-staging/v1p3beta1/.mocharc.js +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -const config = { - "enable-source-maps": true, - "throw-deprecation": true, - "timeout": 10000 -} -if (process.env.MOCHA_THROW_DEPRECATION === 'false') { - delete config['throw-deprecation']; -} -if (process.env.MOCHA_REPORTER) { - config.reporter = process.env.MOCHA_REPORTER; -} -if (process.env.MOCHA_REPORTER_OUTPUT) { - config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; -} -module.exports = config diff --git a/owl-bot-staging/v1p3beta1/.prettierrc.js b/owl-bot-staging/v1p3beta1/.prettierrc.js deleted file mode 100644 index 494e1478..00000000 --- a/owl-bot-staging/v1p3beta1/.prettierrc.js +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -module.exports = { - ...require('gts/.prettierrc.json') -} diff --git a/owl-bot-staging/v1p3beta1/README.md b/owl-bot-staging/v1p3beta1/README.md deleted file mode 100644 index d1c53e8c..00000000 --- a/owl-bot-staging/v1p3beta1/README.md +++ /dev/null @@ -1 +0,0 @@ -Videointelligence: Nodejs Client diff --git a/owl-bot-staging/v1p3beta1/linkinator.config.json b/owl-bot-staging/v1p3beta1/linkinator.config.json deleted file mode 100644 index befd23c8..00000000 --- a/owl-bot-staging/v1p3beta1/linkinator.config.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "recurse": true, - "skip": [ - "https://codecov.io/gh/googleapis/", - "www.googleapis.com", - "img.shields.io", - "https://console.cloud.google.com/cloudshell", - "https://support.google.com" - ], - "silent": true, - "concurrency": 5, - "retry": true, - "retryErrors": true, - "retryErrorsCount": 5, - "retryErrorsJitter": 3000 -} diff --git a/owl-bot-staging/v1p3beta1/package.json b/owl-bot-staging/v1p3beta1/package.json deleted file mode 100644 index ad2f8dab..00000000 --- a/owl-bot-staging/v1p3beta1/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "@google-cloud/video-intelligence", - "version": "0.1.0", - "description": "Videointelligence client for Node.js", - "repository": "googleapis/nodejs-videointelligence", - "license": "Apache-2.0", - "author": "Google LLC", - "main": "build/src/index.js", - "files": [ - "build/src", - "build/protos" - ], - "keywords": [ - "google apis client", - "google api client", - "google apis", - "google api", - "google", - "google cloud platform", - "google cloud", - "cloud", - "google videointelligence", - "videointelligence", - "streaming video intelligence service", - "video intelligence service" - ], - "scripts": { - "clean": "gts clean", - "compile": "tsc -p . && cp -r protos build/", - "compile-protos": "compileProtos src", - "docs": "jsdoc -c .jsdoc.js", - "predocs-test": "npm run docs", - "docs-test": "linkinator docs", - "fix": "gts fix", - "lint": "gts check", - "prepare": "npm run compile-protos && npm run compile", - "system-test": "c8 mocha build/system-test", - "test": "c8 mocha build/test" - }, - "dependencies": { - "google-gax": "^3.1.1" - }, - "devDependencies": { - "@types/mocha": "^9.1.0", - "@types/node": "^16.0.0", - "@types/sinon": "^10.0.8", - "c8": "^7.11.0", - "gts": "^3.1.0", - "jsdoc": "^3.6.7", - "jsdoc-fresh": "^1.1.1", - "jsdoc-region-tag": "^1.3.1", - "linkinator": "^3.0.0", - "mocha": "^9.1.4", - "null-loader": "^4.0.1", - "pack-n-play": "^1.0.0-2", - "sinon": "^13.0.0", - "ts-loader": "^9.2.6", - "typescript": "^4.5.5", - "webpack": "^5.67.0", - "webpack-cli": "^4.9.1" - }, - "engines": { - "node": ">=v12" - } -} diff --git a/owl-bot-staging/v1p3beta1/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto b/owl-bot-staging/v1p3beta1/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto deleted file mode 100644 index db039e67..00000000 --- a/owl-bot-staging/v1p3beta1/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto +++ /dev/null @@ -1,1090 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.videointelligence.v1p3beta1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P3Beta1"; -option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence"; -option java_multiple_files = true; -option java_outer_classname = "VideoIntelligenceServiceProto"; -option java_package = "com.google.cloud.videointelligence.v1p3beta1"; -option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p3beta1"; -option ruby_package = "Google::Cloud::VideoIntelligence::V1p3beta1"; - -// Service that implements the Video Intelligence API. -service VideoIntelligenceService { - option (google.api.default_host) = "videointelligence.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; - - // Performs asynchronous video annotation. Progress and results can be - // retrieved through the `google.longrunning.Operations` interface. - // `Operation.metadata` contains `AnnotateVideoProgress` (progress). - // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1p3beta1/videos:annotate" - body: "*" - }; - option (google.api.method_signature) = "input_uri,features"; - option (google.longrunning.operation_info) = { - response_type: "AnnotateVideoResponse" - metadata_type: "AnnotateVideoProgress" - }; - } -} - -// Service that implements streaming Video Intelligence API. -service StreamingVideoIntelligenceService { - option (google.api.default_host) = "videointelligence.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; - - // Performs video annotation with bidirectional streaming: emitting results - // while sending video/audio bytes. - // This method is only available via the gRPC API (not REST). - rpc StreamingAnnotateVideo(stream StreamingAnnotateVideoRequest) - returns (stream StreamingAnnotateVideoResponse) {} -} - -// Video annotation request. -message AnnotateVideoRequest { - // Input video location. Currently, only - // [Cloud Storage](https://cloud.google.com/storage/) URIs are - // supported. URIs must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request - // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify - // multiple videos, a video URI may include wildcards in the `object-id`. - // Supported wildcards: '*' to match 0 or more characters; - // '?' to match 1 character. If unset, the input video should be embedded - // in the request as `input_content`. If set, `input_content` must be unset. - string input_uri = 1; - - // The video data bytes. - // If unset, the input video(s) should be specified via the `input_uri`. - // If set, `input_uri` must be unset. - bytes input_content = 6; - - // Required. Requested video annotation features. - repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; - - // Additional video context and/or feature-specific parameters. - VideoContext video_context = 3; - - // Optional. Location where the output (in JSON format) should be stored. - // Currently, only [Cloud Storage](https://cloud.google.com/storage/) - // URIs are supported. These must be specified in the following format: - // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request - // URIs](https://cloud.google.com/storage/docs/request-endpoints). - string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Cloud region where annotation should take place. Supported cloud - // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no - // region is specified, the region will be determined based on video file - // location. - string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; -} - -// Video context and/or feature-specific parameters. -message VideoContext { - // Video segments to annotate. The segments may overlap and are not required - // to be contiguous or span the whole video. If unspecified, each video is - // treated as a single segment. - repeated VideoSegment segments = 1; - - // Config for LABEL_DETECTION. - LabelDetectionConfig label_detection_config = 2; - - // Config for SHOT_CHANGE_DETECTION. - ShotChangeDetectionConfig shot_change_detection_config = 3; - - // Config for EXPLICIT_CONTENT_DETECTION. - ExplicitContentDetectionConfig explicit_content_detection_config = 4; - - // Config for FACE_DETECTION. - FaceDetectionConfig face_detection_config = 5; - - // Config for SPEECH_TRANSCRIPTION. - SpeechTranscriptionConfig speech_transcription_config = 6; - - // Config for TEXT_DETECTION. - TextDetectionConfig text_detection_config = 8; - - // Config for PERSON_DETECTION. - PersonDetectionConfig person_detection_config = 11; - - // Config for OBJECT_TRACKING. - ObjectTrackingConfig object_tracking_config = 13; -} - -// Label detection mode. -enum LabelDetectionMode { - // Unspecified. - LABEL_DETECTION_MODE_UNSPECIFIED = 0; - - // Detect shot-level labels. - SHOT_MODE = 1; - - // Detect frame-level labels. - FRAME_MODE = 2; - - // Detect both shot-level and frame-level labels. - SHOT_AND_FRAME_MODE = 3; -} - -// Bucketized representation of likelihood. -enum Likelihood { - // Unspecified likelihood. - LIKELIHOOD_UNSPECIFIED = 0; - - // Very unlikely. - VERY_UNLIKELY = 1; - - // Unlikely. - UNLIKELY = 2; - - // Possible. - POSSIBLE = 3; - - // Likely. - LIKELY = 4; - - // Very likely. - VERY_LIKELY = 5; -} - -// Config for LABEL_DETECTION. -message LabelDetectionConfig { - // What labels should be detected with LABEL_DETECTION, in addition to - // video-level labels or segment-level labels. - // If unspecified, defaults to `SHOT_MODE`. - LabelDetectionMode label_detection_mode = 1; - - // Whether the video has been shot from a stationary (i.e., non-moving) - // camera. When set to true, might improve detection accuracy for moving - // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. - bool stationary_camera = 2; - - // Model to use for label detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 3; - - // The confidence threshold we perform filtering on the labels from - // frame-level detection. If not set, it is set to 0.4 by default. The valid - // range for this threshold is [0.1, 0.9]. Any value set outside of this - // range will be clipped. - // Note: For best results, follow the default threshold. We will update - // the default threshold everytime when we release a new model. - float frame_confidence_threshold = 4; - - // The confidence threshold we perform filtering on the labels from - // video-level and shot-level detections. If not set, it's set to 0.3 by - // default. The valid range for this threshold is [0.1, 0.9]. Any value set - // outside of this range will be clipped. - // Note: For best results, follow the default threshold. We will update - // the default threshold everytime when we release a new model. - float video_confidence_threshold = 5; -} - -// Streaming video annotation feature. -enum StreamingFeature { - // Unspecified. - STREAMING_FEATURE_UNSPECIFIED = 0; - - // Label detection. Detect objects, such as dog or flower. - STREAMING_LABEL_DETECTION = 1; - - // Shot change detection. - STREAMING_SHOT_CHANGE_DETECTION = 2; - - // Explicit content detection. - STREAMING_EXPLICIT_CONTENT_DETECTION = 3; - - // Object detection and tracking. - STREAMING_OBJECT_TRACKING = 4; - - // Action recognition based on AutoML model. - STREAMING_AUTOML_ACTION_RECOGNITION = 23; - - // Video classification based on AutoML model. - STREAMING_AUTOML_CLASSIFICATION = 21; - - // Object detection and tracking based on AutoML model. - STREAMING_AUTOML_OBJECT_TRACKING = 22; -} - -// Video annotation feature. -enum Feature { - // Unspecified. - FEATURE_UNSPECIFIED = 0; - - // Label detection. Detect objects, such as dog or flower. - LABEL_DETECTION = 1; - - // Shot change detection. - SHOT_CHANGE_DETECTION = 2; - - // Explicit content detection. - EXPLICIT_CONTENT_DETECTION = 3; - - // Human face detection. - FACE_DETECTION = 4; - - // Speech transcription. - SPEECH_TRANSCRIPTION = 6; - - // OCR text detection and tracking. - TEXT_DETECTION = 7; - - // Object detection and tracking. - OBJECT_TRACKING = 9; - - // Logo detection, tracking, and recognition. - LOGO_RECOGNITION = 12; - - // Celebrity recognition. - CELEBRITY_RECOGNITION = 13; - - // Person detection. - PERSON_DETECTION = 14; -} - -// Config for SHOT_CHANGE_DETECTION. -message ShotChangeDetectionConfig { - // Model to use for shot change detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for OBJECT_TRACKING. -message ObjectTrackingConfig { - // Model to use for object tracking. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for EXPLICIT_CONTENT_DETECTION. -message ExplicitContentDetectionConfig { - // Model to use for explicit content detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for FACE_DETECTION. -message FaceDetectionConfig { - // Model to use for face detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; - - // Whether bounding boxes are included in the face annotation output. - bool include_bounding_boxes = 2; - - // Whether to enable face attributes detection, such as glasses, dark_glasses, - // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false. - bool include_attributes = 5; -} - -// Config for PERSON_DETECTION. -message PersonDetectionConfig { - // Whether bounding boxes are included in the person detection annotation - // output. - bool include_bounding_boxes = 1; - - // Whether to enable pose landmarks detection. Ignored if - // 'include_bounding_boxes' is set to false. - bool include_pose_landmarks = 2; - - // Whether to enable person attributes detection, such as cloth color (black, - // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, - // etc. - // Ignored if 'include_bounding_boxes' is set to false. - bool include_attributes = 3; -} - -// Config for TEXT_DETECTION. -message TextDetectionConfig { - // Language hint can be specified if the language to be detected is known a - // priori. It can increase the accuracy of the detection. Language hint must - // be language code in BCP-47 format. - // - // Automatic language detection is performed if no hint is provided. - repeated string language_hints = 1; - - // Model to use for text detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 2; -} - -// Video segment. -message VideoSegment { - // Time-offset, relative to the beginning of the video, - // corresponding to the start of the segment (inclusive). - google.protobuf.Duration start_time_offset = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the end of the segment (inclusive). - google.protobuf.Duration end_time_offset = 2; -} - -// Video segment level annotation results for label detection. -message LabelSegment { - // Video segment where a label was detected. - VideoSegment segment = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Video frame level annotation results for label detection. -message LabelFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Confidence that the label is accurate. Range: [0, 1]. - float confidence = 2; -} - -// Detected entity from video analysis. -message Entity { - // Opaque entity ID. Some IDs may be available in - // [Google Knowledge Graph Search - // API](https://developers.google.com/knowledge-graph/). - string entity_id = 1; - - // Textual description, e.g., `Fixed-gear bicycle`. - string description = 2; - - // Language code for `description` in BCP-47 format. - string language_code = 3; -} - -// Label annotation. -message LabelAnnotation { - // Detected entity. - Entity entity = 1; - - // Common categories for the detected entity. - // For example, when the label is `Terrier`, the category is likely `dog`. And - // in some cases there might be more than one categories e.g., `Terrier` could - // also be a `pet`. - repeated Entity category_entities = 2; - - // All video segments where a label was detected. - repeated LabelSegment segments = 3; - - // All video frames where a label was detected. - repeated LabelFrame frames = 4; -} - -// Video frame level annotation results for explicit content. -message ExplicitContentFrame { - // Time-offset, relative to the beginning of the video, corresponding to the - // video frame for this location. - google.protobuf.Duration time_offset = 1; - - // Likelihood of the pornography content.. - Likelihood pornography_likelihood = 2; -} - -// Explicit content annotation (based on per-frame visual signals only). -// If no explicit content has been detected in a frame, no annotations are -// present for that frame. -message ExplicitContentAnnotation { - // All video frames where explicit content was detected. - repeated ExplicitContentFrame frames = 1; -} - -// Normalized bounding box. -// The normalized vertex coordinates are relative to the original image. -// Range: [0, 1]. -message NormalizedBoundingBox { - // Left X coordinate. - float left = 1; - - // Top Y coordinate. - float top = 2; - - // Right X coordinate. - float right = 3; - - // Bottom Y coordinate. - float bottom = 4; -} - -// For tracking related features. -// An object at time_offset with attributes, and located with -// normalized_bounding_box. -message TimestampedObject { - // Normalized Bounding box in a frame, where the object is located. - NormalizedBoundingBox normalized_bounding_box = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the video frame for this object. - google.protobuf.Duration time_offset = 2; - - // Optional. The attributes of the object in the bounding box. - repeated DetectedAttribute attributes = 3 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The detected landmarks. - repeated DetectedLandmark landmarks = 4 - [(google.api.field_behavior) = OPTIONAL]; -} - -// A track of an object instance. -message Track { - // Video segment of a track. - VideoSegment segment = 1; - - // The object with timestamp and attributes per frame in the track. - repeated TimestampedObject timestamped_objects = 2; - - // Optional. Attributes in the track level. - repeated DetectedAttribute attributes = 3 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The confidence score of the tracked object. - float confidence = 4 [(google.api.field_behavior) = OPTIONAL]; -} - -// A generic detected attribute represented by name in string format. -message DetectedAttribute { - // The name of the attribute, for example, glasses, dark_glasses, mouth_open. - // A full list of supported type names will be provided in the document. - string name = 1; - - // Detected attribute confidence. Range [0, 1]. - float confidence = 2; - - // Text value of the detection result. For example, the value for "HairColor" - // can be "black", "blonde", etc. - string value = 3; -} - -// Celebrity definition. -message Celebrity { - // The resource name of the celebrity. Have the format - // `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery. - // kg-mid is the id in Google knowledge graph, which is unique for the - // celebrity. - string name = 1; - - // The celebrity name. - string display_name = 2; - - // Textual description of additional information about the celebrity, if - // applicable. - string description = 3; -} - -// The annotation result of a celebrity face track. RecognizedCelebrity field -// could be empty if the face track does not have any matched celebrities. -message CelebrityTrack { - // The recognized celebrity with confidence score. - message RecognizedCelebrity { - // The recognized celebrity. - Celebrity celebrity = 1; - - // Recognition confidence. Range [0, 1]. - float confidence = 2; - } - - // Top N match of the celebrities for the face in this track. - repeated RecognizedCelebrity celebrities = 1; - - // A track of a person's face. - Track face_track = 3; -} - -// Celebrity recognition annotation per video. -message CelebrityRecognitionAnnotation { - // The tracks detected from the input video, including recognized celebrities - // and other detected faces in the video. - repeated CelebrityTrack celebrity_tracks = 1; -} - -// A generic detected landmark represented by name in string format and a 2D -// location. -message DetectedLandmark { - // The name of this landmark, for example, left_hand, right_shoulder. - string name = 1; - - // The 2D point of the detected landmark using the normalized image - // coordindate system. The normalized coordinates have the range from 0 to 1. - NormalizedVertex point = 2; - - // The confidence score of the detected landmark. Range [0, 1]. - float confidence = 3; -} - -// Face detection annotation. -message FaceDetectionAnnotation { - // The face tracks with attributes. - repeated Track tracks = 3; - - // The thumbnail of a person's face. - bytes thumbnail = 4; -} - -// Person detection annotation per video. -message PersonDetectionAnnotation { - // The detected tracks of a person. - repeated Track tracks = 1; -} - -// Annotation results for a single video. -message VideoAnnotationResults { - // Video file location in - // [Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Video segment on which the annotation is run. - VideoSegment segment = 10; - - // Topical label annotations on video level or user-specified segment level. - // There is exactly one element for each unique label. - repeated LabelAnnotation segment_label_annotations = 2; - - // Presence label annotations on video level or user-specified segment level. - // There is exactly one element for each unique label. Compared to the - // existing topical `segment_label_annotations`, this field presents more - // fine-grained, segment-level labels detected in video content and is made - // available only when the client sets `LabelDetectionConfig.model` to - // "builtin/latest" in the request. - repeated LabelAnnotation segment_presence_label_annotations = 23; - - // Topical label annotations on shot level. - // There is exactly one element for each unique label. - repeated LabelAnnotation shot_label_annotations = 3; - - // Presence label annotations on shot level. There is exactly one element for - // each unique label. Compared to the existing topical - // `shot_label_annotations`, this field presents more fine-grained, shot-level - // labels detected in video content and is made available only when the client - // sets `LabelDetectionConfig.model` to "builtin/latest" in the request. - repeated LabelAnnotation shot_presence_label_annotations = 24; - - // Label annotations on frame level. - // There is exactly one element for each unique label. - repeated LabelAnnotation frame_label_annotations = 4; - - // Face detection annotations. - repeated FaceDetectionAnnotation face_detection_annotations = 13; - - // Shot annotations. Each shot is represented as a video segment. - repeated VideoSegment shot_annotations = 6; - - // Explicit content annotation. - ExplicitContentAnnotation explicit_annotation = 7; - - // Speech transcription. - repeated SpeechTranscription speech_transcriptions = 11; - - // OCR text detection and tracking. - // Annotations for list of detected text snippets. Each will have list of - // frame information associated with it. - repeated TextAnnotation text_annotations = 12; - - // Annotations for list of objects detected and tracked in video. - repeated ObjectTrackingAnnotation object_annotations = 14; - - // Annotations for list of logos detected, tracked and recognized in video. - repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; - - // Person detection annotations. - repeated PersonDetectionAnnotation person_detection_annotations = 20; - - // Celebrity recognition annotations. - CelebrityRecognitionAnnotation celebrity_recognition_annotations = 21; - - // If set, indicates an error. Note that for a single `AnnotateVideoRequest` - // some videos may succeed and some may fail. - google.rpc.Status error = 9; -} - -// Video annotation response. Included in the `response` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoResponse { - // Annotation results for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationResults annotation_results = 1; -} - -// Annotation progress for a single video. -message VideoAnnotationProgress { - // Video file location in - // [Cloud Storage](https://cloud.google.com/storage/). - string input_uri = 1; - - // Approximate percentage processed thus far. Guaranteed to be - // 100 when fully processed. - int32 progress_percent = 2; - - // Time when the request was received. - google.protobuf.Timestamp start_time = 3; - - // Time of the most recent update. - google.protobuf.Timestamp update_time = 4; - - // Specifies which feature is being tracked if the request contains more than - // one feature. - Feature feature = 5; - - // Specifies which segment is being tracked if the request contains more than - // one segment. - VideoSegment segment = 6; -} - -// Video annotation progress. Included in the `metadata` -// field of the `Operation` returned by the `GetOperation` -// call of the `google::longrunning::Operations` service. -message AnnotateVideoProgress { - // Progress metadata for all videos specified in `AnnotateVideoRequest`. - repeated VideoAnnotationProgress annotation_progress = 1; -} - -// Config for SPEECH_TRANSCRIPTION. -message SpeechTranscriptionConfig { - // Required. *Required* The language of the supplied audio as a - // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. - // Example: "en-US". - // See [Language Support](https://cloud.google.com/speech/docs/languages) - // for a list of the currently supported language codes. - string language_code = 1 [(google.api.field_behavior) = REQUIRED]; - - // Optional. Maximum number of recognition hypotheses to be returned. - // Specifically, the maximum number of `SpeechRecognitionAlternative` messages - // within each `SpeechTranscription`. The server may return fewer than - // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will - // return a maximum of one. If omitted, will return a maximum of one. - int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If set to `true`, the server will attempt to filter out - // profanities, replacing all but the initial character in each filtered word - // with asterisks, e.g. "f***". If set to `false` or omitted, profanities - // won't be filtered out. - bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. A means to provide context to assist the speech recognition. - repeated SpeechContext speech_contexts = 4 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If 'true', adds punctuation to recognition result hypotheses. - // This feature is only available in select languages. Setting this for - // requests in other languages has no effect at all. The default 'false' value - // does not add punctuation to result hypotheses. NOTE: "This is currently - // offered as an experimental service, complimentary to all users. In the - // future this may be exclusively available as a premium feature." - bool enable_automatic_punctuation = 5 - [(google.api.field_behavior) = OPTIONAL]; - - // Optional. For file formats, such as MXF or MKV, supporting multiple audio - // tracks, specify up to two tracks. Default: track 0. - repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If 'true', enables speaker detection for each recognized word in - // the top alternative of the recognition result using a speaker_tag provided - // in the WordInfo. - // Note: When this is true, we send all the words from the beginning of the - // audio for the top alternative in every consecutive response. - // This is done in order to improve our speaker tags as our models learn to - // identify the speakers in the conversation over time. - bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If set, specifies the estimated number of speakers in the - // conversation. If not set, defaults to '2'. Ignored unless - // enable_speaker_diarization is set to true. - int32 diarization_speaker_count = 8 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. If `true`, the top result includes a list of words and the - // confidence for those words. If `false`, no word-level confidence - // information is returned. The default is `false`. - bool enable_word_confidence = 9 [(google.api.field_behavior) = OPTIONAL]; -} - -// Provides "hints" to the speech recognizer to favor specific words and phrases -// in the results. -message SpeechContext { - // Optional. A list of strings containing words and phrases "hints" so that - // the speech recognition is more likely to recognize them. This can be used - // to improve the accuracy for specific words and phrases, for example, if - // specific commands are typically spoken by the user. This can also be used - // to add additional words to the vocabulary of the recognizer. See - // [usage limits](https://cloud.google.com/speech/limits#content). - repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; -} - -// A speech recognition result corresponding to a portion of the audio. -message SpeechTranscription { - // May contain one or more recognition hypotheses (up to the maximum specified - // in `max_alternatives`). These alternatives are ordered in terms of - // accuracy, with the top (first) alternative being the most probable, as - // ranked by the recognizer. - repeated SpeechRecognitionAlternative alternatives = 1; - - // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) - // language tag of the language in this result. This language code was - // detected to have the most likelihood of being spoken in the audio. - string language_code = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Alternative hypotheses (a.k.a. n-best list). -message SpeechRecognitionAlternative { - // Transcript text representing the words that the user spoke. - string transcript = 1; - - // Output only. The confidence estimate between 0.0 and 1.0. A higher number - // indicates an estimated greater likelihood that the recognized words are - // correct. This field is set only for the top alternative. - // This field is not guaranteed to be accurate and users should not rely on it - // to be always provided. - // The default of 0.0 is a sentinel value indicating `confidence` was not set. - float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A list of word-specific information for each recognized word. - // Note: When `enable_speaker_diarization` is set to true, you will see all - // the words from the beginning of the audio. - repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Word-specific information for recognized words. Word information is only -// included in the response when certain request parameters are set, such -// as `enable_word_time_offsets`. -message WordInfo { - // Time offset relative to the beginning of the audio, and - // corresponding to the start of the spoken word. This field is only set if - // `enable_word_time_offsets=true` and only in the top hypothesis. This is an - // experimental feature and the accuracy of the time offset can vary. - google.protobuf.Duration start_time = 1; - - // Time offset relative to the beginning of the audio, and - // corresponding to the end of the spoken word. This field is only set if - // `enable_word_time_offsets=true` and only in the top hypothesis. This is an - // experimental feature and the accuracy of the time offset can vary. - google.protobuf.Duration end_time = 2; - - // The word corresponding to this set of information. - string word = 3; - - // Output only. The confidence estimate between 0.0 and 1.0. A higher number - // indicates an estimated greater likelihood that the recognized words are - // correct. This field is set only for the top alternative. - // This field is not guaranteed to be accurate and users should not rely on it - // to be always provided. - // The default of 0.0 is a sentinel value indicating `confidence` was not set. - float confidence = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. A distinct integer value is assigned for every speaker within - // the audio. This field specifies which one of those speakers was detected to - // have spoken this word. Value ranges from 1 up to diarization_speaker_count, - // and is only set if speaker diarization is enabled. - int32 speaker_tag = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// A vertex represents a 2D point in the image. -// NOTE: the normalized vertex coordinates are relative to the original image -// and range from 0 to 1. -message NormalizedVertex { - // X coordinate. - float x = 1; - - // Y coordinate. - float y = 2; -} - -// Normalized bounding polygon for text (that might not be aligned with axis). -// Contains list of the corner points in clockwise order starting from -// top-left corner. For example, for a rectangular bounding box: -// When the text is horizontal it might look like: -// 0----1 -// | | -// 3----2 -// -// When it's clockwise rotated 180 degrees around the top-left corner it -// becomes: -// 2----3 -// | | -// 1----0 -// -// and the vertex order will still be (0, 1, 2, 3). Note that values can be less -// than 0, or greater than 1 due to trignometric calculations for location of -// the box. -message NormalizedBoundingPoly { - // Normalized vertices of the bounding polygon. - repeated NormalizedVertex vertices = 1; -} - -// Video segment level annotation results for text detection. -message TextSegment { - // Video segment where a text snippet was detected. - VideoSegment segment = 1; - - // Confidence for the track of detected text. It is calculated as the highest - // over all frames where OCR detected text appears. - float confidence = 2; - - // Information related to the frames where OCR detected text appears. - repeated TextFrame frames = 3; -} - -// Video frame level annotation results for text annotation (OCR). -// Contains information regarding timestamp and bounding box locations for the -// frames containing detected OCR text snippets. -message TextFrame { - // Bounding polygon of the detected text for this frame. - NormalizedBoundingPoly rotated_bounding_box = 1; - - // Timestamp of this frame. - google.protobuf.Duration time_offset = 2; -} - -// Annotations related to one detected OCR text snippet. This will contain the -// corresponding text, confidence value, and frame level information for each -// detection. -message TextAnnotation { - // The detected text. - string text = 1; - - // All video segments where OCR detected text appears. - repeated TextSegment segments = 2; -} - -// Video frame level annotations for object detection and tracking. This field -// stores per frame location, time offset, and confidence. -message ObjectTrackingFrame { - // The normalized bounding box location of this object track for the frame. - NormalizedBoundingBox normalized_bounding_box = 1; - - // The timestamp of the frame in microseconds. - google.protobuf.Duration time_offset = 2; -} - -// Annotations corresponding to one tracked object. -message ObjectTrackingAnnotation { - // Different representation of tracking info in non-streaming batch - // and streaming modes. - oneof track_info { - // Non-streaming batch mode ONLY. - // Each object track corresponds to one video segment where it appears. - VideoSegment segment = 3; - - // Streaming mode ONLY. - // In streaming mode, we do not know the end time of a tracked object - // before it is completed. Hence, there is no VideoSegment info returned. - // Instead, we provide a unique identifiable integer track_id so that - // the customers can correlate the results of the ongoing - // ObjectTrackAnnotation of the same track_id over time. - int64 track_id = 5; - } - - // Entity to specify the object category that this track is labeled as. - Entity entity = 1; - - // Object category's labeling confidence of this track. - float confidence = 4; - - // Information corresponding to all frames where this object track appears. - // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame - // messages in frames. - // Streaming mode: it can only be one ObjectTrackingFrame message in frames. - repeated ObjectTrackingFrame frames = 2; -} - -// Annotation corresponding to one detected, tracked and recognized logo class. -message LogoRecognitionAnnotation { - // Entity category information to specify the logo class that all the logo - // tracks within this LogoRecognitionAnnotation are recognized as. - Entity entity = 1; - - // All logo tracks where the recognized logo appears. Each track corresponds - // to one logo instance appearing in consecutive frames. - repeated Track tracks = 2; - - // All video segments where the recognized logo appears. There might be - // multiple instances of the same logo class appearing in one VideoSegment. - repeated VideoSegment segments = 3; -} - -// The top-level message sent by the client for the `StreamingAnnotateVideo` -// method. Multiple `StreamingAnnotateVideoRequest` messages are sent. -// The first message must only contain a `StreamingVideoConfig` message. -// All subsequent messages must only contain `input_content` data. -message StreamingAnnotateVideoRequest { - // *Required* The streaming request, which is either a streaming config or - // video content. - oneof streaming_request { - // Provides information to the annotator, specifing how to process the - // request. The first `AnnotateStreamingVideoRequest` message must only - // contain a `video_config` message. - StreamingVideoConfig video_config = 1; - - // The video data to be annotated. Chunks of video data are sequentially - // sent in `StreamingAnnotateVideoRequest` messages. Except the initial - // `StreamingAnnotateVideoRequest` message containing only - // `video_config`, all subsequent `AnnotateStreamingVideoRequest` - // messages must only contain `input_content` field. - // Note: as with all bytes fields, protobuffers use a pure binary - // representation (not base64). - bytes input_content = 2; - } -} - -// Provides information to the annotator that specifies how to process the -// request. -message StreamingVideoConfig { - // Config for requested annotation feature. - oneof streaming_config { - // Config for STREAMING_SHOT_CHANGE_DETECTION. - StreamingShotChangeDetectionConfig shot_change_detection_config = 2; - - // Config for STREAMING_LABEL_DETECTION. - StreamingLabelDetectionConfig label_detection_config = 3; - - // Config for STREAMING_EXPLICIT_CONTENT_DETECTION. - StreamingExplicitContentDetectionConfig explicit_content_detection_config = - 4; - - // Config for STREAMING_OBJECT_TRACKING. - StreamingObjectTrackingConfig object_tracking_config = 5; - - // Config for STREAMING_AUTOML_ACTION_RECOGNITION. - StreamingAutomlActionRecognitionConfig automl_action_recognition_config = - 23; - - // Config for STREAMING_AUTOML_CLASSIFICATION. - StreamingAutomlClassificationConfig automl_classification_config = 21; - - // Config for STREAMING_AUTOML_OBJECT_TRACKING. - StreamingAutomlObjectTrackingConfig automl_object_tracking_config = 22; - } - - // Requested annotation feature. - StreamingFeature feature = 1; - - // Streaming storage option. By default: storage is disabled. - StreamingStorageConfig storage_config = 30; -} - -// `StreamingAnnotateVideoResponse` is the only message returned to the client -// by `StreamingAnnotateVideo`. A series of zero or more -// `StreamingAnnotateVideoResponse` messages are streamed back to the client. -message StreamingAnnotateVideoResponse { - // If set, returns a [google.rpc.Status][google.rpc.Status] message that - // specifies the error for the operation. - google.rpc.Status error = 1; - - // Streaming annotation results. - StreamingVideoAnnotationResults annotation_results = 2; - - // Google Cloud Storage(GCS) URI that stores annotation results of one - // streaming session in JSON format. - // It is the annotation_result_storage_directory - // from the request followed by '/cloud_project_number-session_id'. - string annotation_results_uri = 3; -} - -// Streaming annotation results corresponding to a portion of the video -// that is currently being processed. -message StreamingVideoAnnotationResults { - // Shot annotation results. Each shot is represented as a video segment. - repeated VideoSegment shot_annotations = 1; - - // Label annotation results. - repeated LabelAnnotation label_annotations = 2; - - // Explicit content annotation results. - ExplicitContentAnnotation explicit_annotation = 3; - - // Object tracking results. - repeated ObjectTrackingAnnotation object_annotations = 4; -} - -// Config for STREAMING_SHOT_CHANGE_DETECTION. -message StreamingShotChangeDetectionConfig {} - -// Config for STREAMING_LABEL_DETECTION. -message StreamingLabelDetectionConfig { - // Whether the video has been captured from a stationary (i.e. non-moving) - // camera. When set to true, might improve detection accuracy for moving - // objects. Default: false. - bool stationary_camera = 1; -} - -// Config for STREAMING_EXPLICIT_CONTENT_DETECTION. -message StreamingExplicitContentDetectionConfig {} - -// Config for STREAMING_OBJECT_TRACKING. -message StreamingObjectTrackingConfig {} - -// Config for STREAMING_AUTOML_ACTION_RECOGNITION. -message StreamingAutomlActionRecognitionConfig { - // Resource name of AutoML model. - // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` - string model_name = 1; -} - -// Config for STREAMING_AUTOML_CLASSIFICATION. -message StreamingAutomlClassificationConfig { - // Resource name of AutoML model. - // Format: - // `projects/{project_number}/locations/{location_id}/models/{model_id}` - string model_name = 1; -} - -// Config for STREAMING_AUTOML_OBJECT_TRACKING. -message StreamingAutomlObjectTrackingConfig { - // Resource name of AutoML model. - // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` - string model_name = 1; -} - -// Config for streaming storage option. -message StreamingStorageConfig { - // Enable streaming storage. Default: false. - bool enable_storage_annotation_result = 1; - - // Cloud Storage URI to store all annotation results for one client. Client - // should specify this field as the top-level storage directory. Annotation - // results of different sessions will be put into different sub-directories - // denoted by project_name and session_id. All sub-directories will be auto - // generated by program and will be made accessible to client in response - // proto. URIs must be specified in the following format: - // `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage - // bucket created by client and bucket permission shall also be configured - // properly. `object-id` can be arbitrary string that make sense to client. - // Other URI formats will return error and cause Cloud Storage write failure. - string annotation_result_storage_directory = 3; -} diff --git a/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/snippet_metadata.google.cloud.videointelligence.v1p3beta1.json b/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/snippet_metadata.google.cloud.videointelligence.v1p3beta1.json deleted file mode 100644 index 3bde5e7a..00000000 --- a/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/snippet_metadata.google.cloud.videointelligence.v1p3beta1.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "clientLibrary": { - "name": "nodejs-videointelligence", - "version": "0.1.0", - "language": "TYPESCRIPT", - "apis": [ - { - "id": "google.cloud.videointelligence.v1p3beta1", - "version": "v1p3beta1" - } - ] - }, - "snippets": [ - { - "regionTag": "videointelligence_v1p3beta1_generated_StreamingVideoIntelligenceService_StreamingAnnotateVideo_async", - "title": "videointelligence streamingAnnotateVideo Sample", - "origin": "API_DEFINITION", - "description": " Performs video annotation with bidirectional streaming: emitting results while sending video/audio bytes. This method is only available via the gRPC API (not REST).", - "canonical": true, - "file": "streaming_video_intelligence_service.streaming_annotate_video.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 65, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "StreamingAnnotateVideo", - "fullName": "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService.StreamingAnnotateVideo", - "async": true, - "parameters": [ - { - "name": "video_config", - "type": ".google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig" - }, - { - "name": "input_content", - "type": "TYPE_BYTES" - } - ], - "resultType": ".google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse", - "client": { - "shortName": "StreamingVideoIntelligenceServiceClient", - "fullName": "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient" - }, - "method": { - "shortName": "StreamingAnnotateVideo", - "fullName": "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService.StreamingAnnotateVideo", - "service": { - "shortName": "StreamingVideoIntelligenceService", - "fullName": "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService" - } - } - } - }, - { - "regionTag": "videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async", - "title": "videointelligence annotateVideo Sample", - "origin": "API_DEFINITION", - "description": " Performs asynchronous video annotation. Progress and results can be retrieved through the `google.longrunning.Operations` interface. `Operation.metadata` contains `AnnotateVideoProgress` (progress). `Operation.response` contains `AnnotateVideoResponse` (results).", - "canonical": true, - "file": "video_intelligence_service.annotate_video.js", - "language": "JAVASCRIPT", - "segments": [ - { - "start": 25, - "end": 92, - "type": "FULL" - } - ], - "clientMethod": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService.AnnotateVideo", - "async": true, - "parameters": [ - { - "name": "input_uri", - "type": "TYPE_STRING" - }, - { - "name": "input_content", - "type": "TYPE_BYTES" - }, - { - "name": "features", - "type": "TYPE_ENUM[]" - }, - { - "name": "video_context", - "type": ".google.cloud.videointelligence.v1p3beta1.VideoContext" - }, - { - "name": "output_uri", - "type": "TYPE_STRING" - }, - { - "name": "location_id", - "type": "TYPE_STRING" - } - ], - "resultType": ".google.longrunning.Operation", - "client": { - "shortName": "VideoIntelligenceServiceClient", - "fullName": "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient" - }, - "method": { - "shortName": "AnnotateVideo", - "fullName": "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService.AnnotateVideo", - "service": { - "shortName": "VideoIntelligenceService", - "fullName": "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService" - } - } - } - } - ] -} diff --git a/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js b/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js deleted file mode 100644 index 2236cb17..00000000 --- a/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main() { - // [START videointelligence_v1p3beta1_generated_StreamingVideoIntelligenceService_StreamingAnnotateVideo_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Provides information to the annotator, specifing how to process the - * request. The first `AnnotateStreamingVideoRequest` message must only - * contain a `video_config` message. - */ - // const videoConfig = {} - /** - * The video data to be annotated. Chunks of video data are sequentially - * sent in `StreamingAnnotateVideoRequest` messages. Except the initial - * `StreamingAnnotateVideoRequest` message containing only - * `video_config`, all subsequent `AnnotateStreamingVideoRequest` - * messages must only contain `input_content` field. - * Note: as with all bytes fields, protobuffers use a pure binary - * representation (not base64). - */ - // const inputContent = 'Buffer.from('string')' - - // Imports the Videointelligence library - const {StreamingVideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1p3beta1; - - // Instantiates a client - const videointelligenceClient = new StreamingVideoIntelligenceServiceClient(); - - async function callStreamingAnnotateVideo() { - // Construct request - const request = { - }; - - // Run request - const stream = await videointelligenceClient.streamingAnnotateVideo(); - stream.on('data', (response) => { console.log(response) }); - stream.on('error', (err) => { throw(err) }); - stream.on('end', () => { /* API call completed */ }); - stream.write(request); - stream.end(); - } - - callStreamingAnnotateVideo(); - // [END videointelligence_v1p3beta1_generated_StreamingVideoIntelligenceService_StreamingAnnotateVideo_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js b/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js deleted file mode 100644 index 4f56a02a..00000000 --- a/owl-bot-staging/v1p3beta1/samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - - -'use strict'; - -function main(features) { - // [START videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async] - /** - * TODO(developer): Uncomment these variables before running the sample. - */ - /** - * Input video location. Currently, only - * Cloud Storage (https://cloud.google.com/storage/) URIs are - * supported. URIs must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For - * more information, see Request - * URIs (https://cloud.google.com/storage/docs/request-endpoints). To identify - * multiple videos, a video URI may include wildcards in the `object-id`. - * Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` must be unset. - */ - // const inputUri = 'abc123' - /** - * The video data bytes. - * If unset, the input video(s) should be specified via the `input_uri`. - * If set, `input_uri` must be unset. - */ - // const inputContent = 'Buffer.from('string')' - /** - * Required. Requested video annotation features. - */ - // const features = 1234 - /** - * Additional video context and/or feature-specific parameters. - */ - // const videoContext = {} - /** - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only Cloud Storage (https://cloud.google.com/storage/) - * URIs are supported. These must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT google.rpc.Code.INVALID_ARGUMENT). For - * more information, see Request - * URIs (https://cloud.google.com/storage/docs/request-endpoints). - */ - // const outputUri = 'abc123' - /** - * Optional. Cloud region where annotation should take place. Supported cloud - * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no - * region is specified, the region will be determined based on video file - * location. - */ - // const locationId = 'abc123' - - // Imports the Videointelligence library - const {VideoIntelligenceServiceClient} = require('@google-cloud/video-intelligence').v1p3beta1; - - // Instantiates a client - const videointelligenceClient = new VideoIntelligenceServiceClient(); - - async function callAnnotateVideo() { - // Construct request - const request = { - features, - }; - - // Run request - const [operation] = await videointelligenceClient.annotateVideo(request); - const [response] = await operation.promise(); - console.log(response); - } - - callAnnotateVideo(); - // [END videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async] -} - -process.on('unhandledRejection', err => { - console.error(err.message); - process.exitCode = 1; -}); -main(...process.argv.slice(2)); diff --git a/owl-bot-staging/v1p3beta1/src/index.ts b/owl-bot-staging/v1p3beta1/src/index.ts deleted file mode 100644 index 0b13624e..00000000 --- a/owl-bot-staging/v1p3beta1/src/index.ts +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as v1p3beta1 from './v1p3beta1'; -const StreamingVideoIntelligenceServiceClient = v1p3beta1.StreamingVideoIntelligenceServiceClient; -type StreamingVideoIntelligenceServiceClient = v1p3beta1.StreamingVideoIntelligenceServiceClient; -const VideoIntelligenceServiceClient = v1p3beta1.VideoIntelligenceServiceClient; -type VideoIntelligenceServiceClient = v1p3beta1.VideoIntelligenceServiceClient; -export {v1p3beta1, StreamingVideoIntelligenceServiceClient, VideoIntelligenceServiceClient}; -export default {v1p3beta1, StreamingVideoIntelligenceServiceClient, VideoIntelligenceServiceClient}; -import * as protos from '../protos/protos'; -export {protos} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/gapic_metadata.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/gapic_metadata.json deleted file mode 100644 index eac12ff5..00000000 --- a/owl-bot-staging/v1p3beta1/src/v1p3beta1/gapic_metadata.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "typescript", - "protoPackage": "google.cloud.videointelligence.v1p3beta1", - "libraryPackage": "@google-cloud/video-intelligence", - "services": { - "StreamingVideoIntelligenceService": { - "clients": { - "grpc": { - "libraryClient": "StreamingVideoIntelligenceServiceClient", - "rpcs": { - "StreamingAnnotateVideo": { - "methods": [ - "streamingAnnotateVideo" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "StreamingVideoIntelligenceServiceClient", - "rpcs": {} - } - } - }, - "VideoIntelligenceService": { - "clients": { - "grpc": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - }, - "grpc-fallback": { - "libraryClient": "VideoIntelligenceServiceClient", - "rpcs": { - "AnnotateVideo": { - "methods": [ - "annotateVideo" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/index.ts b/owl-bot-staging/v1p3beta1/src/v1p3beta1/index.ts deleted file mode 100644 index a8ec5240..00000000 --- a/owl-bot-staging/v1p3beta1/src/v1p3beta1/index.ts +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -export {StreamingVideoIntelligenceServiceClient} from './streaming_video_intelligence_service_client'; -export {VideoIntelligenceServiceClient} from './video_intelligence_service_client'; diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client.ts b/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client.ts deleted file mode 100644 index 436e6858..00000000 --- a/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client.ts +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions, GoogleError} from 'google-gax'; - -import { PassThrough } from 'stream'; -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1p3beta1/streaming_video_intelligence_service_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './streaming_video_intelligence_service_client_config.json'; - -const version = require('../../../package.json').version; - -/** - * Service that implements streaming Video Intelligence API. - * @class - * @memberof v1p3beta1 - */ -export class StreamingVideoIntelligenceServiceClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - streamingVideoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of StreamingVideoIntelligenceServiceClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - */ - constructor(opts?: ClientOptions) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof StreamingVideoIntelligenceServiceClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gax.fallback : gax; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - // Some of the methods on this service provide streaming responses. - // Provide descriptors for these. - this.descriptors.stream = { - streamingAnnotateVideo: new this._gaxModule.StreamDescriptor(gax.StreamType.BIDI_STREAMING, opts.fallback === 'rest') - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = gax.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.streamingVideoIntelligenceServiceStub) { - return this.streamingVideoIntelligenceServiceStub; - } - - // Put together the "service stub" for - // google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService. - this.streamingVideoIntelligenceServiceStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const streamingVideoIntelligenceServiceStubMethods = - ['streamingAnnotateVideo']; - for (const methodName of streamingVideoIntelligenceServiceStubMethods) { - const callPromise = this.streamingVideoIntelligenceServiceStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - if (methodName in this.descriptors.stream) { - const stream = new PassThrough(); - setImmediate(() => { - stream.emit('error', new GoogleError('The client has already been closed.')); - }); - return stream; - } - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.stream[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.streamingVideoIntelligenceServiceStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'videointelligence.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'videointelligence.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- - -/** - * Performs video annotation with bidirectional streaming: emitting results - * while sending video/audio bytes. - * This method is only available via the gRPC API (not REST). - * - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Stream} - * An object stream which is both readable and writable. It accepts objects - * representing [StreamingAnnotateVideoRequest]{@link google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest} for write() method, and - * will emit objects representing [StreamingAnnotateVideoResponse]{@link google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse} on 'data' event asynchronously. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#bi-directional-streaming) - * for more details and examples. - * @example include:samples/generated/v1p3beta1/streaming_video_intelligence_service.streaming_annotate_video.js - * region_tag:videointelligence_v1p3beta1_generated_StreamingVideoIntelligenceService_StreamingAnnotateVideo_async - */ - streamingAnnotateVideo( - options?: CallOptions): - gax.CancellableStream { - this.initialize(); - return this.innerApiCalls.streamingAnnotateVideo(null, options); - } - - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.streamingVideoIntelligenceServiceStub && !this._terminated) { - return this.streamingVideoIntelligenceServiceStub.then(stub => { - this._terminated = true; - stub.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client_config.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client_config.json deleted file mode 100644 index b569dbe8..00000000 --- a/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_client_config.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "interfaces": { - "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "StreamingAnnotateVideo": { - "timeout_millis": 10800000, - "retry_codes_name": "idempotent", - "retry_params_name": "default" - } - } - } - } -} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_proto_list.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_proto_list.json deleted file mode 100644 index 85fbf375..00000000 --- a/owl-bot-staging/v1p3beta1/src/v1p3beta1/streaming_video_intelligence_service_proto_list.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - "../../protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto" -] diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client.ts b/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client.ts deleted file mode 100644 index a9deb59b..00000000 --- a/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client.ts +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -/* global window */ -import * as gax from 'google-gax'; -import {Callback, CallOptions, Descriptors, ClientOptions, GrpcClientOptions, LROperation} from 'google-gax'; - -import * as protos from '../../protos/protos'; -import jsonProtos = require('../../protos/protos.json'); -/** - * Client JSON configuration object, loaded from - * `src/v1p3beta1/video_intelligence_service_client_config.json`. - * This file defines retry strategy and timeouts for all API methods in this library. - */ -import * as gapicConfig from './video_intelligence_service_client_config.json'; -import { operationsProtos } from 'google-gax'; -const version = require('../../../package.json').version; - -/** - * Service that implements the Video Intelligence API. - * @class - * @memberof v1p3beta1 - */ -export class VideoIntelligenceServiceClient { - private _terminated = false; - private _opts: ClientOptions; - private _providedCustomServicePath: boolean; - private _gaxModule: typeof gax | typeof gax.fallback; - private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; - private _protos: {}; - private _defaults: {[method: string]: gax.CallSettings}; - auth: gax.GoogleAuth; - descriptors: Descriptors = { - page: {}, - stream: {}, - longrunning: {}, - batching: {}, - }; - warn: (code: string, message: string, warnType?: string) => void; - innerApiCalls: {[name: string]: Function}; - operationsClient: gax.OperationsClient; - videoIntelligenceServiceStub?: Promise<{[name: string]: Function}>; - - /** - * Construct an instance of VideoIntelligenceServiceClient. - * - * @param {object} [options] - The configuration object. - * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). - * The common options are: - * @param {object} [options.credentials] - Credentials object. - * @param {string} [options.credentials.client_email] - * @param {string} [options.credentials.private_key] - * @param {string} [options.email] - Account email address. Required when - * using a .pem or .p12 keyFilename. - * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or - * .p12 key downloaded from the Google Developers Console. If you provide - * a path to a JSON file, the projectId option below is not necessary. - * NOTE: .pem and .p12 require you to specify options.email as well. - * @param {number} [options.port] - The port on which to connect to - * the remote host. - * @param {string} [options.projectId] - The project ID from the Google - * Developer's Console, e.g. 'grape-spaceship-123'. We will also check - * the environment variable GCLOUD_PROJECT for your project ID. If your - * app is running in an environment which supports - * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, - * your project ID will be detected automatically. - * @param {string} [options.apiEndpoint] - The domain name of the - * API remote host. - * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. - * Follows the structure of {@link gapicConfig}. - * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. - * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. - * For more information, please check the - * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. - */ - constructor(opts?: ClientOptions) { - // Ensure that options include all the required fields. - const staticMembers = this.constructor as typeof VideoIntelligenceServiceClient; - const servicePath = opts?.servicePath || opts?.apiEndpoint || staticMembers.servicePath; - this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); - const port = opts?.port || staticMembers.port; - const clientConfig = opts?.clientConfig ?? {}; - const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); - opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); - - // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. - if (servicePath !== staticMembers.servicePath && !('scopes' in opts)) { - opts['scopes'] = staticMembers.scopes; - } - - // Choose either gRPC or proto-over-HTTP implementation of google-gax. - this._gaxModule = opts.fallback ? gax.fallback : gax; - - // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. - this._gaxGrpc = new this._gaxModule.GrpcClient(opts); - - // Save options to use in initialize() method. - this._opts = opts; - - // Save the auth object to the client, for use by other methods. - this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); - - // Set useJWTAccessWithScope on the auth object. - this.auth.useJWTAccessWithScope = true; - - // Set defaultServicePath on the auth object. - this.auth.defaultServicePath = staticMembers.servicePath; - - // Set the default scopes in auth client if needed. - if (servicePath === staticMembers.servicePath) { - this.auth.defaultScopes = staticMembers.scopes; - } - - // Determine the client header string. - const clientHeader = [ - `gax/${this._gaxModule.version}`, - `gapic/${version}`, - ]; - if (typeof process !== 'undefined' && 'versions' in process) { - clientHeader.push(`gl-node/${process.versions.node}`); - } else { - clientHeader.push(`gl-web/${this._gaxModule.version}`); - } - if (!opts.fallback) { - clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); - } else if (opts.fallback === 'rest' ) { - clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); - } - if (opts.libName && opts.libVersion) { - clientHeader.push(`${opts.libName}/${opts.libVersion}`); - } - // Load the applicable protos. - this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); - - const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a - // an Operation object that allows for tracking of the operation, - // rather than holding a request open. - const lroOptions: GrpcClientOptions = { - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined - }; - if (opts.fallback === 'rest') { - lroOptions.protoJson = protoFilesRoot; - lroOptions.httpRules = [{selector: 'google.longrunning.Operations.ListOperations',get: '/v1p3beta1/{name=projects/*/locations/*}/operations',},{selector: 'google.longrunning.Operations.GetOperation',get: '/v1p3beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{get: '/v1p3beta1/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.DeleteOperation',delete: '/v1p3beta1/{name=projects/*/locations/*/operations/*}',additional_bindings: [{delete: '/v1p3beta1/operations/{name=projects/*/locations/*/operations/*}',}], - },{selector: 'google.longrunning.Operations.CancelOperation',post: '/v1p3beta1/{name=projects/*/locations/*/operations/*}:cancel',body: '*',additional_bindings: [{post: '/v1p3beta1/operations/{name=projects/*/locations/*/operations/*}:cancel',}], - }]; - } - this.operationsClient = this._gaxModule.lro(lroOptions).operationsClient(opts); - const annotateVideoResponse = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse') as gax.protobuf.Type; - const annotateVideoMetadata = protoFilesRoot.lookup( - '.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress') as gax.protobuf.Type; - - this.descriptors.longrunning = { - annotateVideo: new this._gaxModule.LongrunningDescriptor( - this.operationsClient, - annotateVideoResponse.decode.bind(annotateVideoResponse), - annotateVideoMetadata.decode.bind(annotateVideoMetadata)) - }; - - // Put together the default options sent with requests. - this._defaults = this._gaxGrpc.constructSettings( - 'google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService', gapicConfig as gax.ClientConfig, - opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); - - // Set up a dictionary of "inner API calls"; the core implementation - // of calling the API is handled in `google-gax`, with this code - // merely providing the destination and request information. - this.innerApiCalls = {}; - - // Add a warn function to the client constructor so it can be easily tested. - this.warn = gax.warn; - } - - /** - * Initialize the client. - * Performs asynchronous operations (such as authentication) and prepares the client. - * This function will be called automatically when any class method is called for the - * first time, but if you need to initialize it before calling an actual method, - * feel free to call initialize() directly. - * - * You can await on this method if you want to make sure the client is initialized. - * - * @returns {Promise} A promise that resolves to an authenticated service stub. - */ - initialize() { - // If the client stub promise is already initialized, return immediately. - if (this.videoIntelligenceServiceStub) { - return this.videoIntelligenceServiceStub; - } - - // Put together the "service stub" for - // google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService. - this.videoIntelligenceServiceStub = this._gaxGrpc.createStub( - this._opts.fallback ? - (this._protos as protobuf.Root).lookupService('google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService') : - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (this._protos as any).google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService, - this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; - - // Iterate over each of the methods that the service provides - // and create an API call method for each. - const videoIntelligenceServiceStubMethods = - ['annotateVideo']; - for (const methodName of videoIntelligenceServiceStubMethods) { - const callPromise = this.videoIntelligenceServiceStub.then( - stub => (...args: Array<{}>) => { - if (this._terminated) { - return Promise.reject('The client has already been closed.'); - } - const func = stub[methodName]; - return func.apply(stub, args); - }, - (err: Error|null|undefined) => () => { - throw err; - }); - - const descriptor = - this.descriptors.longrunning[methodName] || - undefined; - const apiCall = this._gaxModule.createApiCall( - callPromise, - this._defaults[methodName], - descriptor - ); - - this.innerApiCalls[methodName] = apiCall; - } - - return this.videoIntelligenceServiceStub; - } - - /** - * The DNS address for this API service. - * @returns {string} The DNS address for this service. - */ - static get servicePath() { - return 'videointelligence.googleapis.com'; - } - - /** - * The DNS address for this API service - same as servicePath(), - * exists for compatibility reasons. - * @returns {string} The DNS address for this service. - */ - static get apiEndpoint() { - return 'videointelligence.googleapis.com'; - } - - /** - * The port for this API service. - * @returns {number} The default port for this service. - */ - static get port() { - return 443; - } - - /** - * The scopes needed to make gRPC calls for every method defined - * in this service. - * @returns {string[]} List of default scopes. - */ - static get scopes() { - return [ - 'https://www.googleapis.com/auth/cloud-platform' - ]; - } - - getProjectId(): Promise; - getProjectId(callback: Callback): void; - /** - * Return the project ID used by this class. - * @returns {Promise} A promise that resolves to string containing the project ID. - */ - getProjectId(callback?: Callback): - Promise|void { - if (callback) { - this.auth.getProjectId(callback); - return; - } - return this.auth.getProjectId(); - } - - // ------------------- - // -- Service calls -- - // ------------------- - -/** - * Performs asynchronous video annotation. Progress and results can be - * retrieved through the `google.longrunning.Operations` interface. - * `Operation.metadata` contains `AnnotateVideoProgress` (progress). - * `Operation.response` contains `AnnotateVideoResponse` (results). - * - * @param {Object} request - * The request object that will be sent. - * @param {string} request.inputUri - * Input video location. Currently, only - * [Cloud Storage](https://cloud.google.com/storage/) URIs are - * supported. URIs must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For - * more information, see [Request - * URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify - * multiple videos, a video URI may include wildcards in the `object-id`. - * Supported wildcards: '*' to match 0 or more characters; - * '?' to match 1 character. If unset, the input video should be embedded - * in the request as `input_content`. If set, `input_content` must be unset. - * @param {Buffer} request.inputContent - * The video data bytes. - * If unset, the input video(s) should be specified via the `input_uri`. - * If set, `input_uri` must be unset. - * @param {number[]} request.features - * Required. Requested video annotation features. - * @param {google.cloud.videointelligence.v1p3beta1.VideoContext} request.videoContext - * Additional video context and/or feature-specific parameters. - * @param {string} [request.outputUri] - * Optional. Location where the output (in JSON format) should be stored. - * Currently, only [Cloud Storage](https://cloud.google.com/storage/) - * URIs are supported. These must be specified in the following format: - * `gs://bucket-id/object-id` (other URI formats return - * {@link google.rpc.Code.INVALID_ARGUMENT|google.rpc.Code.INVALID_ARGUMENT}). For - * more information, see [Request - * URIs](https://cloud.google.com/storage/docs/request-endpoints). - * @param {string} [request.locationId] - * Optional. Cloud region where annotation should take place. Supported cloud - * regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no - * region is specified, the region will be determined based on video file - * location. - * @param {object} [options] - * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. - * @returns {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * a long running operation. Its `promise()` method returns a promise - * you can `await` for. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async - */ - annotateVideo( - request?: protos.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest, - options?: CallOptions): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>; - annotateVideo( - request: protos.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest, - options: CallOptions, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request: protos.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest, - callback: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): void; - annotateVideo( - request?: protos.google.cloud.videointelligence.v1p3beta1.IAnnotateVideoRequest, - optionsOrCallback?: CallOptions|Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>, - callback?: Callback< - LROperation, - protos.google.longrunning.IOperation|null|undefined, - {}|null|undefined>): - Promise<[ - LROperation, - protos.google.longrunning.IOperation|undefined, {}|undefined - ]>|void { - request = request || {}; - let options: CallOptions; - if (typeof optionsOrCallback === 'function' && callback === undefined) { - callback = optionsOrCallback; - options = {}; - } - else { - options = optionsOrCallback as CallOptions; - } - options = options || {}; - options.otherArgs = options.otherArgs || {}; - options.otherArgs.headers = options.otherArgs.headers || {}; - this.initialize(); - return this.innerApiCalls.annotateVideo(request, options, callback); - } -/** - * Check the status of the long running operation returned by `annotateVideo()`. - * @param {String} name - * The operation name that will be passed. - * @returns {Promise} - The promise which resolves to an object. - * The decoded operation object has result and metadata field to get information from. - * Please see the - * [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations) - * for more details and examples. - * @example include:samples/generated/v1p3beta1/video_intelligence_service.annotate_video.js - * region_tag:videointelligence_v1p3beta1_generated_VideoIntelligenceService_AnnotateVideo_async - */ - async checkAnnotateVideoProgress(name: string): Promise>{ - const request = new operationsProtos.google.longrunning.GetOperationRequest({name}); - const [operation] = await this.operationsClient.getOperation(request); - const decodeOperation = new gax.Operation(operation, this.descriptors.longrunning.annotateVideo, gax.createDefaultBackoffSettings()); - return decodeOperation as LROperation; - } - - /** - * Terminate the gRPC channel and close the client. - * - * The client will no longer be usable and all future behavior is undefined. - * @returns {Promise} A promise that resolves when the client is closed. - */ - close(): Promise { - if (this.videoIntelligenceServiceStub && !this._terminated) { - return this.videoIntelligenceServiceStub.then(stub => { - this._terminated = true; - stub.close(); - this.operationsClient.close(); - }); - } - return Promise.resolve(); - } -} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client_config.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client_config.json deleted file mode 100644 index c9796e48..00000000 --- a/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_client_config.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "interfaces": { - "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService": { - "retry_codes": { - "non_idempotent": [], - "idempotent": [ - "DEADLINE_EXCEEDED", - "UNAVAILABLE" - ] - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - }, - "44183339c3ec233f7d8e740ee644b7ceb1a77fc3": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.5, - "max_retry_delay_millis": 120000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 - } - }, - "methods": { - "AnnotateVideo": { - "timeout_millis": 600000, - "retry_codes_name": "idempotent", - "retry_params_name": "44183339c3ec233f7d8e740ee644b7ceb1a77fc3" - } - } - } - } -} diff --git a/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_proto_list.json b/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_proto_list.json deleted file mode 100644 index 85fbf375..00000000 --- a/owl-bot-staging/v1p3beta1/src/v1p3beta1/video_intelligence_service_proto_list.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - "../../protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto" -] diff --git a/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.js b/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.js deleted file mode 100644 index aafb91c9..00000000 --- a/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.js +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - - -/* eslint-disable node/no-missing-require, no-unused-vars */ -const videointelligence = require('@google-cloud/video-intelligence'); - -function main() { - const streamingVideoIntelligenceServiceClient = new videointelligence.StreamingVideoIntelligenceServiceClient(); - const videoIntelligenceServiceClient = new videointelligence.VideoIntelligenceServiceClient(); -} - -main(); diff --git a/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.ts b/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.ts deleted file mode 100644 index 94514c94..00000000 --- a/owl-bot-staging/v1p3beta1/system-test/fixtures/sample/src/index.ts +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import {StreamingVideoIntelligenceServiceClient, VideoIntelligenceServiceClient} from '@google-cloud/video-intelligence'; - -// check that the client class type name can be used -function doStuffWithStreamingVideoIntelligenceServiceClient(client: StreamingVideoIntelligenceServiceClient) { - client.close(); -} -function doStuffWithVideoIntelligenceServiceClient(client: VideoIntelligenceServiceClient) { - client.close(); -} - -function main() { - // check that the client instance can be created - const streamingVideoIntelligenceServiceClient = new StreamingVideoIntelligenceServiceClient(); - doStuffWithStreamingVideoIntelligenceServiceClient(streamingVideoIntelligenceServiceClient); - // check that the client instance can be created - const videoIntelligenceServiceClient = new VideoIntelligenceServiceClient(); - doStuffWithVideoIntelligenceServiceClient(videoIntelligenceServiceClient); -} - -main(); diff --git a/owl-bot-staging/v1p3beta1/system-test/install.ts b/owl-bot-staging/v1p3beta1/system-test/install.ts deleted file mode 100644 index 8ec45222..00000000 --- a/owl-bot-staging/v1p3beta1/system-test/install.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import { packNTest } from 'pack-n-play'; -import { readFileSync } from 'fs'; -import { describe, it } from 'mocha'; - -describe('📦 pack-n-play test', () => { - - it('TypeScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'TypeScript user can use the type definitions', - ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() - } - }; - await packNTest(options); - }); - - it('JavaScript code', async function() { - this.timeout(300000); - const options = { - packageDir: process.cwd(), - sample: { - description: 'JavaScript user can use the library', - ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() - } - }; - await packNTest(options); - }); - -}); diff --git a/owl-bot-staging/v1p3beta1/test/gapic_streaming_video_intelligence_service_v1p3beta1.ts b/owl-bot-staging/v1p3beta1/test/gapic_streaming_video_intelligence_service_v1p3beta1.ts deleted file mode 100644 index 2a1b730c..00000000 --- a/owl-bot-staging/v1p3beta1/test/gapic_streaming_video_intelligence_service_v1p3beta1.ts +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import { describe, it } from 'mocha'; -import * as streamingvideointelligenceserviceModule from '../src'; - -import {PassThrough} from 'stream'; - -import {protobuf} from 'google-gax'; - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubBidiStreamingCall(response?: ResponseType, error?: Error) { - const transformStub = error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); - const mockStream = new PassThrough({ - objectMode: true, - transform: transformStub, - }); - return sinon.stub().returns(mockStream); -} - -describe('v1p3beta1.StreamingVideoIntelligenceServiceClient', () => { - it('has servicePath', () => { - const servicePath = streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.streamingVideoIntelligenceServiceStub, undefined); - await client.initialize(); - assert(client.streamingVideoIntelligenceServiceStub); - }); - - it('has close method for the initialized client', done => { - const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.streamingVideoIntelligenceServiceStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.streamingVideoIntelligenceServiceStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - - describe('streamingAnnotateVideo', () => { - it('invokes streamingAnnotateVideo without error', async () => { - const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest()); - const expectedResponse = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse()); - client.innerApiCalls.streamingAnnotateVideo = stubBidiStreamingCall(expectedResponse); - const stream = client.streamingAnnotateVideo(); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - stream.write(request); - stream.end(); - }); - const response = await promise; - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.streamingAnnotateVideo as SinonStub) - .getCall(0).calledWith(null)); - assert.deepStrictEqual(((stream as unknown as PassThrough) - ._transform as SinonStub).getCall(0).args[0], request); - }); - - it('invokes streamingAnnotateVideo with error', async () => { - const client = new streamingvideointelligenceserviceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest()); - const expectedError = new Error('expected'); - client.innerApiCalls.streamingAnnotateVideo = stubBidiStreamingCall(undefined, expectedError); - const stream = client.streamingAnnotateVideo(); - const promise = new Promise((resolve, reject) => { - stream.on('data', (response: protos.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse) => { - resolve(response); - }); - stream.on('error', (err: Error) => { - reject(err); - }); - stream.write(request); - stream.end(); - }); - await assert.rejects(promise, expectedError); - assert((client.innerApiCalls.streamingAnnotateVideo as SinonStub) - .getCall(0).calledWith(null)); - assert.deepStrictEqual(((stream as unknown as PassThrough) - ._transform as SinonStub).getCall(0).args[0], request); - }); - }); -}); diff --git a/owl-bot-staging/v1p3beta1/test/gapic_video_intelligence_service_v1p3beta1.ts b/owl-bot-staging/v1p3beta1/test/gapic_video_intelligence_service_v1p3beta1.ts deleted file mode 100644 index fc99736f..00000000 --- a/owl-bot-staging/v1p3beta1/test/gapic_video_intelligence_service_v1p3beta1.ts +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ** This file is automatically generated by gapic-generator-typescript. ** -// ** https://github.com/googleapis/gapic-generator-typescript ** -// ** All changes to this file may be overwritten. ** - -import * as protos from '../protos/protos'; -import * as assert from 'assert'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; -import { describe, it } from 'mocha'; -import * as videointelligenceserviceModule from '../src'; - -import {protobuf, LROperation, operationsProtos} from 'google-gax'; - -function generateSampleMessage(instance: T) { - const filledObject = (instance.constructor as typeof protobuf.Message) - .toObject(instance as protobuf.Message, {defaults: true}); - return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; -} - -function stubSimpleCall(response?: ResponseType, error?: Error) { - return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); -} - -function stubLongRunningCall(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().rejects(callError) : sinon.stub().resolves([mockOperation]); -} - -function stubLongRunningCallWithCallback(response?: ResponseType, callError?: Error, lroError?: Error) { - const innerStub = lroError ? sinon.stub().rejects(lroError) : sinon.stub().resolves([response]); - const mockOperation = { - promise: innerStub, - }; - return callError ? sinon.stub().callsArgWith(2, callError) : sinon.stub().callsArgWith(2, null, mockOperation); -} - -describe('v1p3beta1.VideoIntelligenceServiceClient', () => { - it('has servicePath', () => { - const servicePath = videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient.servicePath; - assert(servicePath); - }); - - it('has apiEndpoint', () => { - const apiEndpoint = videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient.apiEndpoint; - assert(apiEndpoint); - }); - - it('has port', () => { - const port = videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient.port; - assert(port); - assert(typeof port === 'number'); - }); - - it('should create a client with no option', () => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient(); - assert(client); - }); - - it('should create a client with gRPC fallback', () => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - fallback: true, - }); - assert(client); - }); - - it('has initialize method and supports deferred initialization', async () => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - await client.initialize(); - assert(client.videoIntelligenceServiceStub); - }); - - it('has close method for the initialized client', done => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - assert(client.videoIntelligenceServiceStub); - client.close().then(() => { - done(); - }); - }); - - it('has close method for the non-initialized client', done => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - assert.strictEqual(client.videoIntelligenceServiceStub, undefined); - client.close().then(() => { - done(); - }); - }); - - it('has getProjectId method', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); - const result = await client.getProjectId(); - assert.strictEqual(result, fakeProjectId); - assert((client.auth.getProjectId as SinonStub).calledWithExactly()); - }); - - it('has getProjectId method with callback', async () => { - const fakeProjectId = 'fake-project-id'; - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); - const promise = new Promise((resolve, reject) => { - client.getProjectId((err?: Error|null, projectId?: string|null) => { - if (err) { - reject(err); - } else { - resolve(projectId); - } - }); - }); - const result = await promise; - assert.strictEqual(result, fakeProjectId); - }); - - describe('annotateVideo', () => { - it('invokes annotateVideo without error', async () => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCall(expectedResponse); - const [operation] = await client.annotateVideo(request); - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo without error using callback', async () => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedResponse = generateSampleMessage(new protos.google.longrunning.Operation()); - client.innerApiCalls.annotateVideo = stubLongRunningCallWithCallback(expectedResponse); - const promise = new Promise((resolve, reject) => { - client.annotateVideo( - request, - (err?: Error|null, - result?: LROperation|null - ) => { - if (err) { - reject(err); - } else { - resolve(result); - } - }); - }); - const operation = await promise as LROperation; - const [response] = await operation.promise(); - assert.deepStrictEqual(response, expectedResponse); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions /*, callback defined above */)); - }); - - it('invokes annotateVideo with call error', async () => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, expectedError); - await assert.rejects(client.annotateVideo(request), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes annotateVideo with LRO error', async () => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const request = generateSampleMessage(new protos.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest()); - const expectedOptions = {otherArgs: {headers: {}}};; - const expectedError = new Error('expected'); - client.innerApiCalls.annotateVideo = stubLongRunningCall(undefined, undefined, expectedError); - const [operation] = await client.annotateVideo(request); - await assert.rejects(operation.promise(), expectedError); - assert((client.innerApiCalls.annotateVideo as SinonStub) - .getCall(0).calledWith(request, expectedOptions, undefined)); - }); - - it('invokes checkAnnotateVideoProgress without error', async () => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedResponse = generateSampleMessage(new operationsProtos.google.longrunning.Operation()); - expectedResponse.name = 'test'; - expectedResponse.response = {type_url: 'url', value: Buffer.from('')}; - expectedResponse.metadata = {type_url: 'url', value: Buffer.from('')} - - client.operationsClient.getOperation = stubSimpleCall(expectedResponse); - const decodedOperation = await client.checkAnnotateVideoProgress(expectedResponse.name); - assert.deepStrictEqual(decodedOperation.name, expectedResponse.name); - assert(decodedOperation.metadata); - assert((client.operationsClient.getOperation as SinonStub).getCall(0)); - }); - - it('invokes checkAnnotateVideoProgress with error', async () => { - const client = new videointelligenceserviceModule.v1p3beta1.VideoIntelligenceServiceClient({ - credentials: {client_email: 'bogus', private_key: 'bogus'}, - projectId: 'bogus', - }); - client.initialize(); - const expectedError = new Error('expected'); - - client.operationsClient.getOperation = stubSimpleCall(undefined, expectedError); - await assert.rejects(client.checkAnnotateVideoProgress(''), expectedError); - assert((client.operationsClient.getOperation as SinonStub) - .getCall(0)); - }); - }); -}); diff --git a/owl-bot-staging/v1p3beta1/tsconfig.json b/owl-bot-staging/v1p3beta1/tsconfig.json deleted file mode 100644 index c78f1c88..00000000 --- a/owl-bot-staging/v1p3beta1/tsconfig.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "extends": "./node_modules/gts/tsconfig-google.json", - "compilerOptions": { - "rootDir": ".", - "outDir": "build", - "resolveJsonModule": true, - "lib": [ - "es2018", - "dom" - ] - }, - "include": [ - "src/*.ts", - "src/**/*.ts", - "test/*.ts", - "test/**/*.ts", - "system-test/*.ts" - ] -} diff --git a/owl-bot-staging/v1p3beta1/webpack.config.js b/owl-bot-staging/v1p3beta1/webpack.config.js deleted file mode 100644 index 9657601b..00000000 --- a/owl-bot-staging/v1p3beta1/webpack.config.js +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const path = require('path'); - -module.exports = { - entry: './src/index.ts', - output: { - library: 'videointelligence', - filename: './videointelligence.js', - }, - node: { - child_process: 'empty', - fs: 'empty', - crypto: 'empty', - }, - resolve: { - alias: { - '../../../package.json': path.resolve(__dirname, 'package.json'), - }, - extensions: ['.js', '.json', '.ts'], - }, - module: { - rules: [ - { - test: /\.tsx?$/, - use: 'ts-loader', - exclude: /node_modules/ - }, - { - test: /node_modules[\\/]@grpc[\\/]grpc-js/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]grpc/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]retry-request/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]https?-proxy-agent/, - use: 'null-loader' - }, - { - test: /node_modules[\\/]gtoken/, - use: 'null-loader' - }, - ], - }, - mode: 'production', -}; diff --git a/src/v1/video_intelligence_service_client.ts b/src/v1/video_intelligence_service_client.ts index 05f2d234..392839d0 100644 --- a/src/v1/video_intelligence_service_client.ts +++ b/src/v1/video_intelligence_service_client.ts @@ -23,6 +23,7 @@ import { CallOptions, Descriptors, ClientOptions, + GrpcClientOptions, LROperation, } from 'google-gax'; @@ -67,7 +68,7 @@ export class VideoIntelligenceServiceClient { * * @param {object} [options] - The configuration object. * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). * The common options are: * @param {object} [options.credentials] - Credentials object. * @param {string} [options.credentials.client_email] @@ -90,11 +91,10 @@ export class VideoIntelligenceServiceClient { * API remote host. * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. * Follows the structure of {@link gapicConfig}. - * @param {boolean} [options.fallback] - Use HTTP fallback mode. - * In fallback mode, a special browser-compatible transport implementation is used - * instead of gRPC transport. In browser context (if the `window` object is defined) - * the fallback mode is enabled automatically; set `options.fallback` to `false` - * if you need to override this behavior. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. */ constructor(opts?: ClientOptions) { // Ensure that options include all the required fields. @@ -159,16 +159,51 @@ export class VideoIntelligenceServiceClient { this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a // an Operation object that allows for tracking of the operation, // rather than holding a request open. - + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [ + { + selector: 'google.longrunning.Operations.CancelOperation', + post: '/v1/{name=projects/*/locations/*/operations/*}:cancel', + body: '*', + additional_bindings: [ + { + post: '/v1/operations/{name=projects/*/locations/*/operations/*}:cancel', + }, + ], + }, + { + selector: 'google.longrunning.Operations.DeleteOperation', + delete: '/v1/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + { + delete: + '/v1/operations/{name=projects/*/locations/*/operations/*}', + }, + ], + }, + { + selector: 'google.longrunning.Operations.GetOperation', + get: '/v1/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + {get: '/v1/operations/{name=projects/*/locations/*/operations/*}'}, + ], + }, + { + selector: 'google.longrunning.Operations.ListOperations', + get: '/v1/{name=projects/*/locations/*}/operations', + }, + ]; + } this.operationsClient = this._gaxModule - .lro({ - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, - }) + .lro(lroOptions) .operationsClient(opts); const annotateVideoResponse = protoFilesRoot.lookup( '.google.cloud.videointelligence.v1.AnnotateVideoResponse' diff --git a/src/v1beta2/video_intelligence_service_client.ts b/src/v1beta2/video_intelligence_service_client.ts index ef962887..5c31f108 100644 --- a/src/v1beta2/video_intelligence_service_client.ts +++ b/src/v1beta2/video_intelligence_service_client.ts @@ -23,6 +23,7 @@ import { CallOptions, Descriptors, ClientOptions, + GrpcClientOptions, LROperation, } from 'google-gax'; @@ -67,7 +68,7 @@ export class VideoIntelligenceServiceClient { * * @param {object} [options] - The configuration object. * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). * The common options are: * @param {object} [options.credentials] - Credentials object. * @param {string} [options.credentials.client_email] @@ -90,11 +91,10 @@ export class VideoIntelligenceServiceClient { * API remote host. * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. * Follows the structure of {@link gapicConfig}. - * @param {boolean} [options.fallback] - Use HTTP fallback mode. - * In fallback mode, a special browser-compatible transport implementation is used - * instead of gRPC transport. In browser context (if the `window` object is defined) - * the fallback mode is enabled automatically; set `options.fallback` to `false` - * if you need to override this behavior. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. */ constructor(opts?: ClientOptions) { // Ensure that options include all the required fields. @@ -159,16 +159,53 @@ export class VideoIntelligenceServiceClient { this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a // an Operation object that allows for tracking of the operation, // rather than holding a request open. - + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [ + { + selector: 'google.longrunning.Operations.ListOperations', + get: '/v1beta2/{name=projects/*/locations/*}/operations', + }, + { + selector: 'google.longrunning.Operations.GetOperation', + get: '/v1beta2/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + { + get: '/v1beta2/operations/{name=projects/*/locations/*/operations/*}', + }, + ], + }, + { + selector: 'google.longrunning.Operations.DeleteOperation', + delete: '/v1beta2/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + { + delete: + '/v1beta2/operations/{name=projects/*/locations/*/operations/*}', + }, + ], + }, + { + selector: 'google.longrunning.Operations.CancelOperation', + post: '/v1beta2/{name=projects/*/locations/*/operations/*}:cancel', + body: '*', + additional_bindings: [ + { + post: '/v1beta2/operations/{name=projects/*/locations/*/operations/*}:cancel', + }, + ], + }, + ]; + } this.operationsClient = this._gaxModule - .lro({ - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, - }) + .lro(lroOptions) .operationsClient(opts); const annotateVideoResponse = protoFilesRoot.lookup( '.google.cloud.videointelligence.v1beta2.AnnotateVideoResponse' diff --git a/src/v1p1beta1/video_intelligence_service_client.ts b/src/v1p1beta1/video_intelligence_service_client.ts index ee682fff..c4547443 100644 --- a/src/v1p1beta1/video_intelligence_service_client.ts +++ b/src/v1p1beta1/video_intelligence_service_client.ts @@ -23,6 +23,7 @@ import { CallOptions, Descriptors, ClientOptions, + GrpcClientOptions, LROperation, } from 'google-gax'; @@ -67,7 +68,7 @@ export class VideoIntelligenceServiceClient { * * @param {object} [options] - The configuration object. * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). * The common options are: * @param {object} [options.credentials] - Credentials object. * @param {string} [options.credentials.client_email] @@ -90,11 +91,10 @@ export class VideoIntelligenceServiceClient { * API remote host. * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. * Follows the structure of {@link gapicConfig}. - * @param {boolean} [options.fallback] - Use HTTP fallback mode. - * In fallback mode, a special browser-compatible transport implementation is used - * instead of gRPC transport. In browser context (if the `window` object is defined) - * the fallback mode is enabled automatically; set `options.fallback` to `false` - * if you need to override this behavior. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. */ constructor(opts?: ClientOptions) { // Ensure that options include all the required fields. @@ -159,16 +159,53 @@ export class VideoIntelligenceServiceClient { this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a // an Operation object that allows for tracking of the operation, // rather than holding a request open. - + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [ + { + selector: 'google.longrunning.Operations.ListOperations', + get: '/v1p1beta1/{name=projects/*/locations/*}/operations', + }, + { + selector: 'google.longrunning.Operations.GetOperation', + get: '/v1p1beta1/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + { + get: '/v1p1beta1/operations/{name=projects/*/locations/*/operations/*}', + }, + ], + }, + { + selector: 'google.longrunning.Operations.DeleteOperation', + delete: '/v1p1beta1/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + { + delete: + '/v1p1beta1/operations/{name=projects/*/locations/*/operations/*}', + }, + ], + }, + { + selector: 'google.longrunning.Operations.CancelOperation', + post: '/v1p1beta1/{name=projects/*/locations/*/operations/*}:cancel', + body: '*', + additional_bindings: [ + { + post: '/v1p1beta1/operations/{name=projects/*/locations/*/operations/*}:cancel', + }, + ], + }, + ]; + } this.operationsClient = this._gaxModule - .lro({ - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, - }) + .lro(lroOptions) .operationsClient(opts); const annotateVideoResponse = protoFilesRoot.lookup( '.google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse' diff --git a/src/v1p2beta1/video_intelligence_service_client.ts b/src/v1p2beta1/video_intelligence_service_client.ts index 2ef98b4f..6a7d3777 100644 --- a/src/v1p2beta1/video_intelligence_service_client.ts +++ b/src/v1p2beta1/video_intelligence_service_client.ts @@ -23,6 +23,7 @@ import { CallOptions, Descriptors, ClientOptions, + GrpcClientOptions, LROperation, } from 'google-gax'; @@ -67,7 +68,7 @@ export class VideoIntelligenceServiceClient { * * @param {object} [options] - The configuration object. * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). * The common options are: * @param {object} [options.credentials] - Credentials object. * @param {string} [options.credentials.client_email] @@ -90,11 +91,10 @@ export class VideoIntelligenceServiceClient { * API remote host. * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. * Follows the structure of {@link gapicConfig}. - * @param {boolean} [options.fallback] - Use HTTP fallback mode. - * In fallback mode, a special browser-compatible transport implementation is used - * instead of gRPC transport. In browser context (if the `window` object is defined) - * the fallback mode is enabled automatically; set `options.fallback` to `false` - * if you need to override this behavior. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. */ constructor(opts?: ClientOptions) { // Ensure that options include all the required fields. @@ -159,16 +159,53 @@ export class VideoIntelligenceServiceClient { this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a // an Operation object that allows for tracking of the operation, // rather than holding a request open. - + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [ + { + selector: 'google.longrunning.Operations.ListOperations', + get: '/v1p2beta1/{name=projects/*/locations/*}/operations', + }, + { + selector: 'google.longrunning.Operations.GetOperation', + get: '/v1p2beta1/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + { + get: '/v1p2beta1/operations/{name=projects/*/locations/*/operations/*}', + }, + ], + }, + { + selector: 'google.longrunning.Operations.DeleteOperation', + delete: '/v1p2beta1/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + { + delete: + '/v1p2beta1/operations/{name=projects/*/locations/*/operations/*}', + }, + ], + }, + { + selector: 'google.longrunning.Operations.CancelOperation', + post: '/v1p2beta1/{name=projects/*/locations/*/operations/*}:cancel', + body: '*', + additional_bindings: [ + { + post: '/v1p2beta1/operations/{name=projects/*/locations/*/operations/*}:cancel', + }, + ], + }, + ]; + } this.operationsClient = this._gaxModule - .lro({ - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, - }) + .lro(lroOptions) .operationsClient(opts); const annotateVideoResponse = protoFilesRoot.lookup( '.google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse' diff --git a/src/v1p3beta1/streaming_video_intelligence_service_client.ts b/src/v1p3beta1/streaming_video_intelligence_service_client.ts index a8190dae..3e21b7fd 100644 --- a/src/v1p3beta1/streaming_video_intelligence_service_client.ts +++ b/src/v1p3beta1/streaming_video_intelligence_service_client.ts @@ -67,7 +67,7 @@ export class StreamingVideoIntelligenceServiceClient { * * @param {object} [options] - The configuration object. * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). * The common options are: * @param {object} [options.credentials] - Credentials object. * @param {string} [options.credentials.client_email] @@ -90,11 +90,10 @@ export class StreamingVideoIntelligenceServiceClient { * API remote host. * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. * Follows the structure of {@link gapicConfig}. - * @param {boolean} [options.fallback] - Use HTTP fallback mode. - * In fallback mode, a special browser-compatible transport implementation is used - * instead of gRPC transport. In browser context (if the `window` object is defined) - * the fallback mode is enabled automatically; set `options.fallback` to `false` - * if you need to override this behavior. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. */ constructor(opts?: ClientOptions) { // Ensure that options include all the required fields. diff --git a/src/v1p3beta1/video_intelligence_service_client.ts b/src/v1p3beta1/video_intelligence_service_client.ts index 7475f0fe..61a566e7 100644 --- a/src/v1p3beta1/video_intelligence_service_client.ts +++ b/src/v1p3beta1/video_intelligence_service_client.ts @@ -23,6 +23,7 @@ import { CallOptions, Descriptors, ClientOptions, + GrpcClientOptions, LROperation, } from 'google-gax'; @@ -67,7 +68,7 @@ export class VideoIntelligenceServiceClient { * * @param {object} [options] - The configuration object. * The options accepted by the constructor are described in detail - * in [this document](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#creating-the-client-instance). + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). * The common options are: * @param {object} [options.credentials] - Credentials object. * @param {string} [options.credentials.client_email] @@ -90,11 +91,10 @@ export class VideoIntelligenceServiceClient { * API remote host. * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. * Follows the structure of {@link gapicConfig}. - * @param {boolean} [options.fallback] - Use HTTP fallback mode. - * In fallback mode, a special browser-compatible transport implementation is used - * instead of gRPC transport. In browser context (if the `window` object is defined) - * the fallback mode is enabled automatically; set `options.fallback` to `false` - * if you need to override this behavior. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. */ constructor(opts?: ClientOptions) { // Ensure that options include all the required fields. @@ -159,16 +159,53 @@ export class VideoIntelligenceServiceClient { this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); const protoFilesRoot = this._gaxModule.protobuf.Root.fromJSON(jsonProtos); - // This API contains "long-running operations", which return a // an Operation object that allows for tracking of the operation, // rather than holding a request open. - + const lroOptions: GrpcClientOptions = { + auth: this.auth, + grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, + }; + if (opts.fallback === 'rest') { + lroOptions.protoJson = protoFilesRoot; + lroOptions.httpRules = [ + { + selector: 'google.longrunning.Operations.ListOperations', + get: '/v1p3beta1/{name=projects/*/locations/*}/operations', + }, + { + selector: 'google.longrunning.Operations.GetOperation', + get: '/v1p3beta1/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + { + get: '/v1p3beta1/operations/{name=projects/*/locations/*/operations/*}', + }, + ], + }, + { + selector: 'google.longrunning.Operations.DeleteOperation', + delete: '/v1p3beta1/{name=projects/*/locations/*/operations/*}', + additional_bindings: [ + { + delete: + '/v1p3beta1/operations/{name=projects/*/locations/*/operations/*}', + }, + ], + }, + { + selector: 'google.longrunning.Operations.CancelOperation', + post: '/v1p3beta1/{name=projects/*/locations/*/operations/*}:cancel', + body: '*', + additional_bindings: [ + { + post: '/v1p3beta1/operations/{name=projects/*/locations/*/operations/*}:cancel', + }, + ], + }, + ]; + } this.operationsClient = this._gaxModule - .lro({ - auth: this.auth, - grpc: 'grpc' in this._gaxGrpc ? this._gaxGrpc.grpc : undefined, - }) + .lro(lroOptions) .operationsClient(opts); const annotateVideoResponse = protoFilesRoot.lookup( '.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse'