diff --git a/translate/hybridGlossaries.js b/translate/hybridGlossaries.js
new file mode 100644
index 0000000000..9017c1b887
--- /dev/null
+++ b/translate/hybridGlossaries.js
@@ -0,0 +1,232 @@
+/**
+ * Copyright 2019 Google LLC
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+'use strict';
+
+async function main(
+ projectId, // Your GCP Project Id
+ inFile = 'resources/example.png',
+ outFile = 'resources/example.mp3',
+ glossaryLangs = ['fr', 'en'],
+ glossaryName = 'bistro-glossary',
+ glossaryUri = 'gs://cloud-samples-data/translation/bistro_glossary.csv'
+) {
+ // [START translate_hybrid_imports]
+ // Imports the Google Cloud client library
+ const textToSpeech = require('@google-cloud/text-to-speech');
+ const translate = require('@google-cloud/translate').v3beta1;
+ const vision = require('@google-cloud/vision');
+
+ // Import other required libraries
+ const fs = require('fs');
+ //const escape = require('escape-html');
+ const util = require('util');
+ // [END translate_hybrid_imports]
+
+ // [START translate_hybrid_vision]
+ /**
+ * Detects text in an image file
+ *
+ * ARGS
+ * inputFile: path to image file
+ * RETURNS
+ * string of text detected in the input image
+ **/
+ async function picToText(inputFile) {
+ // Creates a client
+ const client = new vision.ImageAnnotatorClient();
+
+ // Performs text detection on the local file
+ const [result] = await client.textDetection(inputFile);
+ return result.fullTextAnnotation.text;
+ }
+ // [END translate_hybrid_vision]
+
+ // [START translate_hybrid_create_glossary]
+ /** Creates a GCP glossary resource
+ * Assumes you've already manually uploaded a glossary to Cloud Storage
+ *
+ * ARGS
+ * languages: list of languages in the glossary
+ * projectId: GCP project id
+ * glossaryName: name you want to give this glossary resource
+ * glossaryUri: the uri of the glossary you uploaded to Cloud Storage
+ * RETURNS
+ * nothing
+ **/
+ async function createGlossary(
+ languages,
+ projectId,
+ glossaryName,
+ glossaryUri
+ ) {
+ // Instantiates a client
+ const translationClient = await new translate.TranslationServiceClient();
+
+ // Construct glossary
+ const glossary = {
+ languageCodesSet: {
+ languageCodes: languages,
+ },
+ inputConfig: {
+ gcsSource: {
+ inputUri: glossaryUri,
+ },
+ },
+ name: translationClient.glossaryPath(
+ projectId,
+ 'us-central1',
+ glossaryName
+ ),
+ };
+
+ // Construct request
+ const request = {
+ parent: translationClient.locationPath(projectId, 'us-central1'),
+ glossary: glossary,
+ };
+
+ // Create glossary using a long-running operation.
+ try {
+ const [operation] = await translationClient.createGlossary(request);
+ // Wait for operation to complete.
+ await operation.promise();
+ console.log(`Created glossary ` + glossaryName + '.');
+ } catch (AlreadyExists) {
+ console.log(
+ 'The glossary ' +
+ glossaryName +
+ ' already exists. No new glossary was created.'
+ );
+ }
+ }
+ // [END translate_hybrid_create_glossary]
+
+ // [START translate_hybrid_translate]
+ /**
+ * Translates text to a given language using a glossary
+ *
+ * ARGS
+ * text: String of text to translate
+ * sourceLanguageCode: language of input text
+ * targetLanguageCode: language of output text
+ * projectId: GCP project id
+ * glossaryName: name you gave your project's glossary
+ * resource when you created it
+ * RETURNS
+ * String of translated text
+ **/
+ async function translateText(
+ text,
+ sourceLanguageCode,
+ targetLanguageCode,
+ projectId,
+ glossaryName
+ ) {
+ // Instantiates a client
+ const translationClient = new translate.TranslationServiceClient();
+ const glossary = translationClient.glossaryPath(
+ projectId,
+ 'us-central1',
+ glossaryName
+ );
+ const glossaryConfig = {
+ glossary: glossary,
+ };
+ // Construct request
+ const request = {
+ parent: translationClient.locationPath(projectId, 'us-central1'),
+ contents: [text],
+ mimeType: 'text/plain', // mime types: text/plain, text/html
+ sourceLanguageCode: sourceLanguageCode,
+ targetLanguageCode: targetLanguageCode,
+ glossaryConfig: glossaryConfig,
+ };
+
+ // Run request
+ const [response] = await translationClient.translateText(request);
+ // Extract the string of translated text
+ return response.glossaryTranslations[0].translatedText;
+ }
+ // [END translate_hybrid_translate]
+
+ // [START translate_hybrid_text_to_speech]
+ /**
+ * Generates synthetic audio from plaintext tagged with SSML.
+ *
+ * Given the name of a text file and an output file name, this function
+ * tags the text in the text file with SSML. This function then
+ * calls the Text-to-Speech API. The API returns a synthetic audio
+ * version of the text, formatted according to the SSML commands. This
+ * function saves the synthetic audio to the designated output file.
+ *
+ * ARGS
+ * text: String of plaintext
+ * outFile: String name of file under which to save audio output
+ * RETURNS
+ * nothing
+ *
+ */
+ async function syntheticAudio(text, outFile) {
+ // Replace special characters with HTML Ampersand Character Codes
+ // These codes prevent the API from confusing text with SSML tags
+ // For example, '<' --> '<' and '&' --> '&'
+ let escapedLines = text.replace(/&/g, '&');
+ escapedLines = escapedLines.replace(/"/g, '"');
+ escapedLines = escapedLines.replace(//g, '>');
+
+ // Convert plaintext to SSML
+ // Tag SSML so that there is a 2 second pause between each address
+ const expandedNewline = escapedLines.replace(/\n/g, '\n');
+ const ssmlText = '' + expandedNewline + '';
+
+ // Creates a client
+ const client = new textToSpeech.TextToSpeechClient();
+
+ // Constructs the request
+ const request = {
+ // Select the text to synthesize
+ input: {ssml: ssmlText},
+ // Select the language and SSML Voice Gender (optional)
+ voice: {languageCode: 'en-US', ssmlGender: 'MALE'},
+ // Select the type of audio encoding
+ audioConfig: {audioEncoding: 'MP3'},
+ };
+
+ // Performs the Text-to-Speech request
+ const [response] = await client.synthesizeSpeech(request);
+ // Write the binary audio content to a local file
+ const writeFile = util.promisify(fs.writeFile);
+ await writeFile(outFile, response.audioContent, 'binary');
+ console.log('Audio content written to file ' + outFile);
+ }
+ // [END translate_hybrid_text_to_speech]
+
+ // [START translate_hybrid_integration]
+ await createGlossary(glossaryLangs, projectId, glossaryName, glossaryUri);
+ const text = await picToText(inFile);
+ const translatedText = await translateText(
+ text,
+ 'fr',
+ 'en',
+ projectId,
+ glossaryName
+ );
+ syntheticAudio(translatedText, outFile);
+ // [END translate_hybrid_integration]
+}
+
+main(...process.argv.slice(2));
diff --git a/translate/package.json b/translate/package.json
index a6c5cfba5c..08b07603ab 100644
--- a/translate/package.json
+++ b/translate/package.json
@@ -13,12 +13,14 @@
},
"dependencies": {
"@google-cloud/automl": "^1.0.0",
+ "@google-cloud/text-to-speech": "^1.1.4",
"@google-cloud/translate": "^4.1.3",
+ "@google-cloud/vision": "^1.2.0",
"yargs": "^14.0.0"
},
"devDependencies": {
+ "@google-cloud/storage": "^3.2.1",
"chai": "^4.2.0",
- "@google-cloud/storage": "^3.0.0",
"mocha": "^6.0.0",
"uuid": "^3.3.2"
}
diff --git a/translate/resources/example.png b/translate/resources/example.png
new file mode 100644
index 0000000000..a3ac25ab05
Binary files /dev/null and b/translate/resources/example.png differ
diff --git a/translate/test/hybridGlossaries.test.js b/translate/test/hybridGlossaries.test.js
new file mode 100644
index 0000000000..721c450752
--- /dev/null
+++ b/translate/test/hybridGlossaries.test.js
@@ -0,0 +1,73 @@
+/**
+ * Copyright 2019 Google LLC
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+'use strict';
+
+const fs = require('fs');
+const {assert} = require('chai');
+const {TranslationServiceClient} = require('@google-cloud/translate').v3beta1;
+const cp = require('child_process');
+
+const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});
+
+const REGION_TAG = 'translate_hybrid_glossaries';
+
+describe(REGION_TAG, () => {
+ const translationClient = new TranslationServiceClient();
+ const location = 'us-central1';
+ const glossaryId = 'bistro-glossary';
+ const outputFile = 'resources/example.mp3';
+
+ before(async function() {
+ try {
+ fs.unlinkSync(outputFile);
+ } catch (e) {
+ // don't throw an exception
+ }
+ });
+
+ it('should create a glossary', async () => {
+ const projectId = await translationClient.getProjectId();
+ const output = execSync(`node hybridGlossaries.js ${projectId}`);
+ assert(output.includes(glossaryId));
+ assert(
+ output.includes('Audio content written to file resources/example.mp3')
+ );
+ assert.strictEqual(fs.existsSync(outputFile), true);
+ });
+
+ after(async function() {
+ fs.unlinkSync(outputFile);
+ assert.strictEqual(fs.existsSync(outputFile), false);
+ // get projectId
+ const projectId = await translationClient.getProjectId();
+ const name = translationClient.glossaryPath(
+ projectId,
+ location,
+ glossaryId
+ );
+ const request = {
+ parent: translationClient.locationPath(projectId, location),
+ name: name,
+ };
+
+ // Delete glossary using a long-running operation.
+ // You can wait for now, or get results later.
+ const [operation] = await translationClient.deleteGlossary(request);
+
+ // Wait for operation to complete.
+ await operation.promise();
+ });
+});