From 517d33e81512acdbb4e58c1bc4db6f332e968f88 Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Wed, 12 Feb 2020 11:00:00 -0800 Subject: [PATCH 1/4] fix: adds spaces to region tags, other fixes --- samples/analyze.v1p3beta1.js | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/samples/analyze.v1p3beta1.js b/samples/analyze.v1p3beta1.js index 456f2f3f..3fcb78ea 100644 --- a/samples/analyze.v1p3beta1.js +++ b/samples/analyze.v1p3beta1.js @@ -15,7 +15,7 @@ 'use strict'; async function detectPerson(path) { - //[START video_detect_person_beta] + // [START video_detect_person_beta] // Imports the Google Cloud Video Intelligence library + Node's fs library const Video = require('@google-cloud/video-intelligence').v1p3beta1; const fs = require('fs'); @@ -44,6 +44,7 @@ async function detectPerson(path) { }, }; // Detects people in a video + // We get the first result because only one video is processed. const [operation] = await video.annotateVideo(request); const results = await operation.promise(); console.log('Waiting for operation to complete...'); @@ -96,7 +97,7 @@ async function detectPerson(path) { // [END video_detect_person_beta] } async function detectPersonGCS(gcsUri) { - //[START video_detect_person_gcs_beta] + // [START video_detect_person_gcs_beta] // Imports the Google Cloud Video Intelligence library const Video = require('@google-cloud/video-intelligence').v1p3beta1; // Creates a client @@ -173,7 +174,7 @@ async function detectPersonGCS(gcsUri) { // [END video_detect_person_gcs_beta] } async function detectFaces(path) { - //[START video_detect_faces_beta] + // [START video_detect_faces_beta] // Imports the Google Cloud Video Intelligence library + Node's fs library const Video = require('@google-cloud/video-intelligence').v1p3beta1; const fs = require('fs'); @@ -244,10 +245,10 @@ async function detectFaces(path) { } } } - //[END video_detect_faces_beta] + // [END video_detect_faces_beta] } async function detectFacesGCS(gcsUri) { - //[START video_detect_faces_gcs_beta] + // [START video_detect_faces_gcs_beta] // Imports the Google Cloud Video Intelligence library const Video = require('@google-cloud/video-intelligence').v1p3beta1; // Creates a client @@ -314,7 +315,7 @@ async function detectFacesGCS(gcsUri) { } } } - //[END video_detect_faces_gcs_beta] + // [END video_detect_faces_gcs_beta] } async function main() { From c614dfd393695dd331f0df1477318ad2a72e47ec Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Wed, 12 Feb 2020 13:07:07 -0800 Subject: [PATCH 2/4] fix: refactors people and face detection into separate files --- samples/analyze_face_detection.js | 98 ++++++++++++++++ samples/analyze_face_detection_gcs.js | 95 +++++++++++++++ samples/analyze_person_detection.js | 111 ++++++++++++++++++ samples/analyze_person_detection_gcs.js | 102 ++++++++++++++++ .../analyze_face_detection.test.js | 31 +++++ .../analyze_face_detection_gcs.test.js | 31 +++++ .../analyze_person_detection.test.js | 31 +++++ .../analyze_person_detection_gcs.test.js | 31 +++++ 8 files changed, 530 insertions(+) create mode 100644 samples/analyze_face_detection.js create mode 100644 samples/analyze_face_detection_gcs.js create mode 100644 samples/analyze_person_detection.js create mode 100644 samples/analyze_person_detection_gcs.js create mode 100644 samples/system-test/analyze_face_detection.test.js create mode 100644 samples/system-test/analyze_face_detection_gcs.test.js create mode 100644 samples/system-test/analyze_person_detection.test.js create mode 100644 samples/system-test/analyze_person_detection_gcs.test.js diff --git a/samples/analyze_face_detection.js b/samples/analyze_face_detection.js new file mode 100644 index 00000000..7049f213 --- /dev/null +++ b/samples/analyze_face_detection.js @@ -0,0 +1,98 @@ +/** + * Copyright 2020, Google, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +function main(path = 'YOUR_LOCAL_FILE') { + // [START video_detect_faces_beta] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const path = 'Local file to analyze, e.g. ./my-file.mp4'; + + // Imports the Google Cloud Video Intelligence library + Node's fs library + const Video = require('@google-cloud/video-intelligence').v1p3beta1; + const fs = require('fs'); + + // Creates a client + const video = new Video.VideoIntelligenceServiceClient(); + + // Reads a local video file and converts it to base64 + const file = fs.readFileSync(path); + const inputContent = file.toString('base64'); + + async function detectFaces() { + const request = { + inputContent: inputContent, + features: ['FACE_DETECTION'], + videoContext: { + faceDetectionConfig: { + // Must set includeBoundingBoxes to true to get facial attributes. + includeBoundingBoxes: true, + includeAttributes: true, + }, + }, + }; + // Detects faces in a video + // We get the first result because we only process 1 video + const [operation] = await video.annotateVideo(request); + const results = await operation.promise(); + console.log('Waiting for operation to complete...'); + + // Gets annotations for video + const faceAnnotations = + results[0].annotationResults[0].faceDetectionAnnotations; + for (const {tracks} of faceAnnotations) { + console.log('Face detected:'); + for (const {segment, timestampedObjects} of tracks) { + if (segment.startTimeOffset.seconds === undefined) { + segment.startTimeOffset.seconds = 0; + } + if (segment.startTimeOffset.nanos === undefined) { + segment.startTimeOffset.nanos = 0; + } + if (segment.endTimeOffset.seconds === undefined) { + segment.endTimeOffset.seconds = 0; + } + if (segment.endTimeOffset.nanos === undefined) { + segment.endTimeOffset.nanos = 0; + } + console.log( + `\tStart: ${segment.startTimeOffset.seconds}` + + `.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s` + ); + console.log( + `\tEnd: ${segment.endTimeOffset.seconds}.` + + `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s` + ); + + // Each segment includes timestamped objects that + // include characteristics of the face detected. + const [firstTimestapedObject] = timestampedObjects; + + for (const {name} of firstTimestapedObject.attributes) { + // Attributes include unique pieces of clothing, like glasses, + // poses, or hair color. + console.log(`\tAttribute: ${name}; `); + } + } + } + } + + detectFaces(); + // [END video_detect_faces_beta] +} + +main(...process.argv.slice(2)); diff --git a/samples/analyze_face_detection_gcs.js b/samples/analyze_face_detection_gcs.js new file mode 100644 index 00000000..86006f08 --- /dev/null +++ b/samples/analyze_face_detection_gcs.js @@ -0,0 +1,95 @@ +/** + * Copyright 2020, Google, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +function main(gcsUri = 'YOUR_STORAGE_URI') { + // [START video_detect_faces_gcs_beta] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4'; + + // Imports the Google Cloud Video Intelligence library + Node's fs library + const Video = require('@google-cloud/video-intelligence').v1p3beta1; + + // Creates a client + const video = new Video.VideoIntelligenceServiceClient(); + + async function detectFacesGCS() { + const request = { + inputUri: gcsUri, + features: ['FACE_DETECTION'], + videoContext: { + faceDetectionConfig: { + // Must set includeBoundingBoxes to true to get facial attributes. + includeBoundingBoxes: true, + includeAttributes: true, + }, + }, + }; + // Detects faces in a video + // We get the first result because we only process 1 video + const [operation] = await video.annotateVideo(request); + const results = await operation.promise(); + console.log('Waiting for operation to complete...'); + + // Gets annotations for video + const faceAnnotations = + results[0].annotationResults[0].faceDetectionAnnotations; + + for (const {tracks} of faceAnnotations) { + console.log('Face detected:'); + + for (const {segment, timestampedObjects} of tracks) { + if (segment.startTimeOffset.seconds === undefined) { + segment.startTimeOffset.seconds = 0; + } + if (segment.startTimeOffset.nanos === undefined) { + segment.startTimeOffset.nanos = 0; + } + if (segment.endTimeOffset.seconds === undefined) { + segment.endTimeOffset.seconds = 0; + } + if (segment.endTimeOffset.nanos === undefined) { + segment.endTimeOffset.nanos = 0; + } + console.log( + `\tStart: ${segment.startTimeOffset.seconds}.` + + `${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s` + ); + console.log( + `\tEnd: ${segment.endTimeOffset.seconds}.` + + `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s` + ); + + // Each segment includes timestamped objects that + // include characteristics of the face detected. + const [firstTimestapedObject] = timestampedObjects; + + for (const {name} of firstTimestapedObject.attributes) { + // Attributes include unique pieces of clothing, like glasses, + // poses, or hair color. + console.log(`\tAttribute: ${name}; `); + } + } + } + } + + detectFacesGCS(); + // [END video_detect_faces_gcs_beta] +} + +main(...process.argv.slice(2)); diff --git a/samples/analyze_person_detection.js b/samples/analyze_person_detection.js new file mode 100644 index 00000000..1eddacb6 --- /dev/null +++ b/samples/analyze_person_detection.js @@ -0,0 +1,111 @@ +/** + * Copyright 2020, Google, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +function main(path = 'YOUR_LOCAL_FILE') { + // [START video_detect_person_beta] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4'; + + // Imports the Google Cloud Video Intelligence library + Node's fs library + const Video = require('@google-cloud/video-intelligence').v1p3beta1; + const fs = require('fs'); + // Creates a client + const video = new Video.VideoIntelligenceServiceClient(); + + /** + * TODO(developer): Uncomment the following line before running the sample. + */ + // const path = 'Local file to analyze, e.g. ./my-file.mp4'; + + // Reads a local video file and converts it to base64 + const file = fs.readFileSync(path); + const inputContent = file.toString('base64'); + + async function detectPerson() { + const request = { + inputContent: inputContent, + features: ['PERSON_DETECTION'], + videoContext: { + personDetectionConfig: { + // Must set includeBoundingBoxes to true to get poses and attributes. + includeBoundingBoxes: true, + includePoseLandmarks: true, + includeAttributes: true, + }, + }, + }; + // Detects faces in a video + // We get the first result because we only process 1 video + const [operation] = await video.annotateVideo(request); + const results = await operation.promise(); + console.log('Waiting for operation to complete...'); + + // Gets annotations for video + const personAnnotations = + results[0].annotationResults[0].personDetectionAnnotations; + + for (const {tracks} of personAnnotations) { + console.log('Person detected:'); + + for (const {segment, timestampedObjects} of tracks) { + if (segment.startTimeOffset.seconds === undefined) { + segment.startTimeOffset.seconds = 0; + } + if (segment.startTimeOffset.nanos === undefined) { + segment.startTimeOffset.nanos = 0; + } + if (segment.endTimeOffset.seconds === undefined) { + segment.endTimeOffset.seconds = 0; + } + if (segment.endTimeOffset.nanos === undefined) { + segment.endTimeOffset.nanos = 0; + } + console.log( + `\tStart: ${segment.startTimeOffset.seconds}` + + `.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s` + ); + console.log( + `\tEnd: ${segment.endTimeOffset.seconds}.` + + `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s` + ); + + // Each segment includes timestamped objects that + // include characteristic--e.g. clothes, posture + // of the person detected. + const [firstTimestampedObject] = timestampedObjects; + + // Attributes include unique pieces of clothing, + // poses, or hair color. + for (const {name, value} of firstTimestampedObject.attributes) { + console.log(`\tAttribute: ${name}; Value: ${value}`); + } + + // Landmarks in person detection include body parts. + for (const {name, point} of firstTimestampedObject.landmarks) { + console.log(`\tLandmark: ${name}; Vertex: ${point.x}, ${point.y}`); + } + } + } + } + + detectPerson(); + // [END video_detect_person_beta] +} + +main(...process.argv.slice(2)); diff --git a/samples/analyze_person_detection_gcs.js b/samples/analyze_person_detection_gcs.js new file mode 100644 index 00000000..7322464c --- /dev/null +++ b/samples/analyze_person_detection_gcs.js @@ -0,0 +1,102 @@ +/** + * Copyright 2020, Google, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +function main(gcsUri = 'YOUR_STORAGE_URI') { + // [START video_detect_person_gcs_beta] + /** + * TODO(developer): Uncomment these variables before running the sample. + */ + // const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4'; + + // Imports the Google Cloud Video Intelligence library + Node's fs library + const Video = require('@google-cloud/video-intelligence').v1p3beta1; + + // Creates a client + const video = new Video.VideoIntelligenceServiceClient(); + + async function detectPersonGCS() { + const request = { + inputUri: gcsUri, + features: ['PERSON_DETECTION'], + videoContext: { + personDetectionConfig: { + // Must set includeBoundingBoxes to true to get poses and attributes. + includeBoundingBoxes: true, + includePoseLandmarks: true, + includeAttributes: true, + }, + }, + }; + // Detects faces in a video + // We get the first result because we only process 1 video + const [operation] = await video.annotateVideo(request); + const results = await operation.promise(); + console.log('Waiting for operation to complete...'); + + // Gets annotations for video + const personAnnotations = + results[0].annotationResults[0].personDetectionAnnotations; + + for (const {tracks} of personAnnotations) { + console.log('Person detected:'); + + for (const {segment, timestampedObjects} of tracks) { + if (segment.startTimeOffset.seconds === undefined) { + segment.startTimeOffset.seconds = 0; + } + if (segment.startTimeOffset.nanos === undefined) { + segment.startTimeOffset.nanos = 0; + } + if (segment.endTimeOffset.seconds === undefined) { + segment.endTimeOffset.seconds = 0; + } + if (segment.endTimeOffset.nanos === undefined) { + segment.endTimeOffset.nanos = 0; + } + console.log( + `\tStart: ${segment.startTimeOffset.seconds}` + + `.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s` + ); + console.log( + `\tEnd: ${segment.endTimeOffset.seconds}.` + + `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s` + ); + + // Each segment includes timestamped objects that + // include characteristic--e.g. clothes, posture + // of the person detected. + const [firstTimestampedObject] = timestampedObjects; + + // Attributes include unique pieces of clothing, + // poses, or hair color. + for (const {name, value} of firstTimestampedObject.attributes) { + console.log(`\tAttribute: ${name}; Value: ${value}`); + } + + // Landmarks in person detection include body parts. + for (const {name, point} of firstTimestampedObject.landmarks) { + console.log(`\tLandmark: ${name}; Vertex: ${point.x}, ${point.y}`); + } + } + } + } + + detectPersonGCS(); + // [END video_detect_person_gcs_beta] +} + +main(...process.argv.slice(2)); diff --git a/samples/system-test/analyze_face_detection.test.js b/samples/system-test/analyze_face_detection.test.js new file mode 100644 index 00000000..2172af94 --- /dev/null +++ b/samples/system-test/analyze_face_detection.test.js @@ -0,0 +1,31 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const cp = require('child_process'); +const {assert} = require('chai'); +const {describe, it} = require('mocha'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +const cmd = `node analyze_face_detection.js`; +const file = 'resources/googlework_short.mp4'; + +describe('analyzing faces in video', () => { + it('should identify faces in a local file', async () => { + const output = execSync(`${cmd} ${file}`); + assert.match(output, /glasses/); + }); +}); diff --git a/samples/system-test/analyze_face_detection_gcs.test.js b/samples/system-test/analyze_face_detection_gcs.test.js new file mode 100644 index 00000000..4cd9ae6a --- /dev/null +++ b/samples/system-test/analyze_face_detection_gcs.test.js @@ -0,0 +1,31 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const cp = require('child_process'); +const {assert} = require('chai'); +const {describe, it} = require('mocha'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +const cmd = `node analyze_face_detection_gcs.js`; +const gcsUri = 'gs://cloud-samples-data/video/googlework_short.mp4'; + +describe('analyzing faces in video', () => { + it('should identify faces in a file in Google Storage', async () => { + const output = execSync(`${cmd} ${gcsUri}`); + assert.match(output, /glasses/); + }); +}); diff --git a/samples/system-test/analyze_person_detection.test.js b/samples/system-test/analyze_person_detection.test.js new file mode 100644 index 00000000..b802c3e1 --- /dev/null +++ b/samples/system-test/analyze_person_detection.test.js @@ -0,0 +1,31 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const cp = require('child_process'); +const {assert} = require('chai'); +const {describe, it} = require('mocha'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +const cmd = `node analyze_person_detection.js`; +const file = 'resources/googlework_short.mp4'; + +describe('analyzing people in video', () => { + it('should identify people in a local file', async () => { + const output = execSync(`${cmd} ${file}`); + assert.match(output, /Hair/); + }); +}); diff --git a/samples/system-test/analyze_person_detection_gcs.test.js b/samples/system-test/analyze_person_detection_gcs.test.js new file mode 100644 index 00000000..1f359bf9 --- /dev/null +++ b/samples/system-test/analyze_person_detection_gcs.test.js @@ -0,0 +1,31 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const cp = require('child_process'); +const {assert} = require('chai'); +const {describe, it} = require('mocha'); + +const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); + +const cmd = `node analyze_person_detection_gcs.js`; +const gcsUri = 'gs://cloud-samples-data/video/googlework_short.mp4'; + +describe('analyzing people in video', () => { + it('should identify people in a file in Google Storage', async () => { + const output = execSync(`${cmd} ${gcsUri}`); + assert.match(output, /Hair/); + }); +}); From dc461896b863ca70062b4eecf29d9ad3c63e9756 Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Wed, 12 Feb 2020 13:16:17 -0800 Subject: [PATCH 3/4] fix: removes unneeded files --- samples/analyze.v1p3beta1.js | 365 ------------------ samples/system-test/analyze.v1p3beta1.test.js | 49 --- 2 files changed, 414 deletions(-) delete mode 100644 samples/analyze.v1p3beta1.js delete mode 100644 samples/system-test/analyze.v1p3beta1.test.js diff --git a/samples/analyze.v1p3beta1.js b/samples/analyze.v1p3beta1.js deleted file mode 100644 index 3fcb78ea..00000000 --- a/samples/analyze.v1p3beta1.js +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -'use strict'; - -async function detectPerson(path) { - // [START video_detect_person_beta] - // Imports the Google Cloud Video Intelligence library + Node's fs library - const Video = require('@google-cloud/video-intelligence').v1p3beta1; - const fs = require('fs'); - // Creates a client - const video = new Video.VideoIntelligenceServiceClient(); - - /** - * TODO(developer): Uncomment the following line before running the sample. - */ - // const path = 'Local file to analyze, e.g. ./my-file.mp4'; - - // Reads a local video file and converts it to base64 - const file = fs.readFileSync(path); - const inputContent = file.toString('base64'); - - const request = { - inputContent: inputContent, - features: ['PERSON_DETECTION'], - videoContext: { - personDetectionConfig: { - // Must set includeBoundingBoxes to true to get poses and attributes. - includeBoundingBoxes: true, - includePoseLandmarks: true, - includeAttributes: true, - }, - }, - }; - // Detects people in a video - // We get the first result because only one video is processed. - const [operation] = await video.annotateVideo(request); - const results = await operation.promise(); - console.log('Waiting for operation to complete...'); - - // Gets annotations for video - const personAnnotations = - results[0].annotationResults[0].personDetectionAnnotations; - - for (const {tracks} of personAnnotations) { - console.log('Person detected:'); - for (const {segment, timestampedObjects} of tracks) { - if (segment.startTimeOffset.seconds === undefined) { - segment.startTimeOffset.seconds = 0; - } - if (segment.startTimeOffset.nanos === undefined) { - segment.startTimeOffset.nanos = 0; - } - if (segment.endTimeOffset.seconds === undefined) { - segment.endTimeOffset.seconds = 0; - } - if (segment.endTimeOffset.nanos === undefined) { - segment.endTimeOffset.nanos = 0; - } - console.log( - `\tStart: ${segment.startTimeOffset.seconds}.` + - `${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s` - ); - console.log( - `\tEnd: ${segment.endTimeOffset.seconds}.` + - `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s` - ); - - // Each segment includes timestamped objects that - // include characteristic--e.g. clothes, posture - // of the person detected. - const [firstTimestampedObject] = timestampedObjects; - - // Attributes include unique pieces of clothing, - // poses, or hair color. - for (const {name, value} of firstTimestampedObject.attributes) { - console.log(`\tAttribute: ${name}; ` + `Value: ${value}`); - } - - // Landmarks in person detection include body parts. - for (const {name, point} of firstTimestampedObject.landmarks) { - console.log(`\tLandmark: ${name}; Vertex: ${point.x}, ${point.y}`); - } - } - } - // [END video_detect_person_beta] -} -async function detectPersonGCS(gcsUri) { - // [START video_detect_person_gcs_beta] - // Imports the Google Cloud Video Intelligence library - const Video = require('@google-cloud/video-intelligence').v1p3beta1; - // Creates a client - const video = new Video.VideoIntelligenceServiceClient(); - - /** - * TODO(developer): Uncomment the following line before running the sample. - */ - // const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4'; - - const request = { - inputUri: gcsUri, - features: ['PERSON_DETECTION'], - videoContext: { - personDetectionConfig: { - // Must set includeBoundingBoxes to true to get poses and attributes. - includeBoundingBoxes: true, - includePoseLandmarks: true, - includeAttributes: true, - }, - }, - }; - // Detects people in a video - const [operation] = await video.annotateVideo(request); - const results = await operation.promise(); - console.log('Waiting for operation to complete...'); - - // Gets annotations for video - const personAnnotations = - results[0].annotationResults[0].personDetectionAnnotations; - - for (const {tracks} of personAnnotations) { - console.log('Person detected:'); - - for (const {segment, timestampedObjects} of tracks) { - if (segment.startTimeOffset.seconds === undefined) { - segment.startTimeOffset.seconds = 0; - } - if (segment.startTimeOffset.nanos === undefined) { - segment.startTimeOffset.nanos = 0; - } - if (segment.endTimeOffset.seconds === undefined) { - segment.endTimeOffset.seconds = 0; - } - if (segment.endTimeOffset.nanos === undefined) { - segment.endTimeOffset.nanos = 0; - } - console.log( - `\tStart: ${segment.startTimeOffset.seconds}` + - `.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s` - ); - console.log( - `\tEnd: ${segment.endTimeOffset.seconds}.` + - `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s` - ); - - // Each segment includes timestamped objects that - // include characteristic--e.g. clothes, posture - // of the person detected. - const [firstTimestampedObject] = timestampedObjects; - - // Attributes include unique pieces of clothing, - // poses, or hair color. - for (const {name, value} of firstTimestampedObject.attributes) { - console.log(`\tAttribute: ${name}; ` + `Value: ${value}`); - } - - // Landmarks in person detection include body parts. - for (const {name, point} of firstTimestampedObject.landmarks) { - console.log(`\tLandmark: ${name}; Vertex: ${point.x}, ${point.y}`); - } - } - } - // [END video_detect_person_gcs_beta] -} -async function detectFaces(path) { - // [START video_detect_faces_beta] - // Imports the Google Cloud Video Intelligence library + Node's fs library - const Video = require('@google-cloud/video-intelligence').v1p3beta1; - const fs = require('fs'); - // Creates a client - const video = new Video.VideoIntelligenceServiceClient(); - - /** - * TODO(developer): Uncomment the following line before running the sample. - */ - // const path = 'Local file to analyze, e.g. ./my-file.mp4'; - - // Reads a local video file and converts it to base64 - const file = fs.readFileSync(path); - const inputContent = file.toString('base64'); - - const request = { - inputContent: inputContent, - features: ['FACE_DETECTION'], - videoContext: { - faceDetectionConfig: { - // Must set includeBoundingBoxes to true to get facial attributes. - includeBoundingBoxes: true, - includeAttributes: true, - }, - }, - }; - // Detects faces in a video - const [operation] = await video.annotateVideo(request); - const results = await operation.promise(); - console.log('Waiting for operation to complete...'); - - // Gets annotations for video - const faceAnnotations = - results[0].annotationResults[0].faceDetectionAnnotations; - - for (const {tracks} of faceAnnotations) { - console.log('Face detected:'); - for (const {segment, timestampedObjects} of tracks) { - if (segment.startTimeOffset.seconds === undefined) { - segment.startTimeOffset.seconds = 0; - } - if (segment.startTimeOffset.nanos === undefined) { - segment.startTimeOffset.nanos = 0; - } - if (segment.endTimeOffset.seconds === undefined) { - segment.endTimeOffset.seconds = 0; - } - if (segment.endTimeOffset.nanos === undefined) { - segment.endTimeOffset.nanos = 0; - } - console.log( - `\tStart: ${segment.startTimeOffset.seconds}` + - `.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s` - ); - console.log( - `\tEnd: ${segment.endTimeOffset.seconds}.` + - `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s` - ); - - // Each segment includes timestamped objects that - // include characteristics of the face detected. - const [firstTimestapedObject] = timestampedObjects; - - for (const {name} of firstTimestapedObject.attributes) { - // Attributes include unique pieces of clothing, like glasses, - // poses, or hair color. - console.log(`\tAttribute: ${name}; `); - } - } - } - // [END video_detect_faces_beta] -} -async function detectFacesGCS(gcsUri) { - // [START video_detect_faces_gcs_beta] - // Imports the Google Cloud Video Intelligence library - const Video = require('@google-cloud/video-intelligence').v1p3beta1; - // Creates a client - const video = new Video.VideoIntelligenceServiceClient(); - - /** - * TODO(developer): Uncomment the following line before running the sample. - */ - // const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4'; - - const request = { - inputUri: gcsUri, - features: ['FACE_DETECTION'], - videoContext: { - faceDetectionConfig: { - // Must set includeBoundingBoxes to true to get facial attributes. - includeBoundingBoxes: true, - includeAttributes: true, - }, - }, - }; - // Detects faces in a video - const [operation] = await video.annotateVideo(request); - const results = await operation.promise(); - console.log('Waiting for operation to complete...'); - - // Gets annotations for video - const faceAnnotations = - results[0].annotationResults[0].faceDetectionAnnotations; - - for (const {tracks} of faceAnnotations) { - console.log('Face detected:'); - - for (const {segment, timestampedObjects} of tracks) { - if (segment.startTimeOffset.seconds === undefined) { - segment.startTimeOffset.seconds = 0; - } - if (segment.startTimeOffset.nanos === undefined) { - segment.startTimeOffset.nanos = 0; - } - if (segment.endTimeOffset.seconds === undefined) { - segment.endTimeOffset.seconds = 0; - } - if (segment.endTimeOffset.nanos === undefined) { - segment.endTimeOffset.nanos = 0; - } - console.log( - `\tStart: ${segment.startTimeOffset.seconds}.` + - `${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s` - ); - console.log( - `\tEnd: ${segment.endTimeOffset.seconds}.` + - `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s` - ); - - // Each segment includes timestamped objects that - // include characteristics of the face detected. - const [firstTimestapedObject] = timestampedObjects; - - for (const {name} of firstTimestapedObject.attributes) { - // Attributes include unique pieces of clothing, like glasses, - // poses, or hair color. - console.log(`\tAttribute: ${name}; `); - } - } - } - // [END video_detect_faces_gcs_beta] -} - -async function main() { - require(`yargs`) - .demand(1) - .command( - `video-person-gcs `, - `Detects people in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.`, - {}, - opts => detectPersonGCS(opts.gcsUri) - ) - .command( - `video-person `, - `Detects people in a video stored in a local file using the Cloud Video Intelligence API.`, - {}, - opts => detectPerson(opts.path) - ) - .command( - `video-faces-gcs `, - `Detects faces in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.`, - {}, - opts => detectFacesGCS(opts.gcsUri) - ) - .command( - `video-faces `, - `Detects faces in a video stored in a local file using the Cloud Video Intelligence API.`, - {}, - opts => detectFaces(opts.path) - ) - .example(`node $0 video-person ./resources/googlework_short.mp4`) - .example( - `node $0 video-person-gcs gs://cloud-samples-data/video/googlework_short.mp4` - ) - .example(`node $0 video-faces ./resources/googlework_short.mp4`) - .example( - `node $0 video-faces-gcs gs://cloud-samples-data/video/googlework_short.mp4` - ) - .wrap(120) - .recommendCommands() - .epilogue( - `For more information, see https://cloud.google.com/video-intelligence/docs` - ) - .help() - .strict().argv; -} - -main().catch(console.error); diff --git a/samples/system-test/analyze.v1p3beta1.test.js b/samples/system-test/analyze.v1p3beta1.test.js deleted file mode 100644 index 2abc7568..00000000 --- a/samples/system-test/analyze.v1p3beta1.test.js +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// https://cloud.google.com/video-intelligence/docs/ - -'use strict'; - -const {assert} = require('chai'); -const {describe, it} = require('mocha'); -const cp = require('child_process'); - -const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); - -const cmd = 'node analyze.v1p3beta1.js'; -const url = 'gs://cloud-samples-data/video/googlework_short.mp4'; -const file = 'resources/googlework_short.mp4'; - -describe('analyze v1p3beta1 samples', () => { - it('should detect people in a local file', async () => { - const output = execSync(`${cmd} video-person ${file}`); - assert.match(output, /Hair/); - }); - - it('should detect people in a GCS file', async () => { - const output = execSync(`${cmd} video-person-gcs ${url}`); - assert.match(output, /Hair/); - }); - - it('should detect faces in a local file', async () => { - const output = execSync(`${cmd} video-faces ${file}`); - assert.match(output, /glasses/); - }); - - it('should detect faces in a GCS file', async () => { - const output = execSync(`${cmd} video-faces-gcs ${url}`); - assert.match(output, /glasses/); - }); -}); From a5bb43cfe8836f78d579e033d88a79d8afb5ad63 Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Wed, 12 Feb 2020 13:25:51 -0800 Subject: [PATCH 4/4] fix: switches copyright lines in samples --- samples/analyze_face_detection.js | 27 ++++++++++++------------- samples/analyze_face_detection_gcs.js | 27 ++++++++++++------------- samples/analyze_person_detection.js | 27 ++++++++++++------------- samples/analyze_person_detection_gcs.js | 27 ++++++++++++------------- 4 files changed, 52 insertions(+), 56 deletions(-) diff --git a/samples/analyze_face_detection.js b/samples/analyze_face_detection.js index 7049f213..cf2c9c83 100644 --- a/samples/analyze_face_detection.js +++ b/samples/analyze_face_detection.js @@ -1,17 +1,16 @@ -/** - * Copyright 2020, Google, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. 'use strict'; diff --git a/samples/analyze_face_detection_gcs.js b/samples/analyze_face_detection_gcs.js index 86006f08..b28f64f3 100644 --- a/samples/analyze_face_detection_gcs.js +++ b/samples/analyze_face_detection_gcs.js @@ -1,17 +1,16 @@ -/** - * Copyright 2020, Google, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. 'use strict'; diff --git a/samples/analyze_person_detection.js b/samples/analyze_person_detection.js index 1eddacb6..69fb1b46 100644 --- a/samples/analyze_person_detection.js +++ b/samples/analyze_person_detection.js @@ -1,17 +1,16 @@ -/** - * Copyright 2020, Google, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. 'use strict'; diff --git a/samples/analyze_person_detection_gcs.js b/samples/analyze_person_detection_gcs.js index 7322464c..d341590c 100644 --- a/samples/analyze_person_detection_gcs.js +++ b/samples/analyze_person_detection_gcs.js @@ -1,17 +1,16 @@ -/** - * Copyright 2020, Google, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. 'use strict';