Skip to content

Commit

Permalink
feat!: proper long running operation returned from Agent v2beta1 calls (
Browse files Browse the repository at this point in the history
#621)

* changes without context

        autosynth cannot find the source of changes triggered by earlier changes in this
        repository, or by version upgrades to tools such as linters.

* feat!: additional client library annotations feat: reload information in the Document resource BREAKING CHANGE: additional long-running operation annotations BREAKING CHANGE: some existing request fields are now explicitly required BREAKING CHANGE: the order of the Session resource patterns has changed

PiperOrigin-RevId: 312345206

Source-Author: Google APIs <noreply@google.com>
Source-Date: Tue May 19 13:46:01 2020 -0700
Source-Repo: googleapis/googleapis
Source-Sha: 7a8875d4bcf51993f6f7aa181066cc5ec95539d5
Source-Link: googleapis/googleapis@7a8875d

* feat: add one additional binding for SearchAgents API; clean up deps; update Go library. docs: update doc links for intent.

PiperOrigin-RevId: 313408465

Source-Author: Google APIs <noreply@google.com>
Source-Date: Wed May 27 09:53:46 2020 -0700
Source-Repo: googleapis/googleapis
Source-Sha: 576234f179d2e8dde1e629888afafb59abec7e6c
Source-Link: googleapis/googleapis@576234f

* docs: cleaned docs for the Agents service and resource.

PiperOrigin-RevId: 314879617

Source-Author: Google APIs <noreply@google.com>
Source-Date: Fri Jun 5 00:27:22 2020 -0700
Source-Repo: googleapis/googleapis
Source-Sha: cd804bab06e46dd1a4f16c32155fd3cddb931b52
Source-Link: googleapis/googleapis@cd804ba

* fix(tests): avoid Node v10 memory issue when running tests

Co-authored-by: Alexander Fenster <fenster@google.com>
  • Loading branch information
yoshi-automation and alexander-fenster authored Jun 24, 2020
1 parent 759adde commit 7f8c820
Show file tree
Hide file tree
Showing 43 changed files with 20,176 additions and 2,392 deletions.
2 changes: 1 addition & 1 deletion packages/google-cloud-dialogflow/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
"Dialogflow API"
],
"scripts": {
"test": "c8 mocha build/test",
"test": "c8 mocha build/test/*_v2.js && c8 mocha build/test/*_v2beta1.js",
"samples-test": "cd samples/ && npm link ../ && npm install && npm test && cd ../",
"system-test": "mocha build/system-test",
"docs": "jsdoc -c .jsdoc.js",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,34 +33,7 @@ option java_outer_classname = "AgentProto";
option java_package = "com.google.cloud.dialogflow.v2";
option objc_class_prefix = "DF";

// Agents are best described as Natural Language Understanding (NLU) modules
// that transform user requests into actionable data. You can include agents
// in your app, product, or service to determine user intent and respond to the
// user in a natural way.
//
// After you create an agent, you can add [Intents][google.cloud.dialogflow.v2.Intents], [Contexts][google.cloud.dialogflow.v2.Contexts],
// [Entity Types][google.cloud.dialogflow.v2.EntityTypes], [Webhooks][google.cloud.dialogflow.v2.WebhookRequest], and so on to
// manage the flow of a conversation and match user input to predefined intents
// and actions.
//
// You can create an agent using both Dialogflow Standard Edition and
// Dialogflow Enterprise Edition. For details, see
// [Dialogflow
// Editions](https://cloud.google.com/dialogflow/docs/editions).
//
// You can save your agent for backup or versioning by exporting the agent by
// using the [ExportAgent][google.cloud.dialogflow.v2.Agents.ExportAgent] method. You can import a saved
// agent by using the [ImportAgent][google.cloud.dialogflow.v2.Agents.ImportAgent] method.
//
// Dialogflow provides several
// [prebuilt
// agents](https://cloud.google.com/dialogflow/docs/agents-prebuilt)
// for common conversation scenarios such as determining a date and time,
// converting currency, and so on.
//
// For more information about agents, see the
// [Dialogflow
// documentation](https://cloud.google.com/dialogflow/docs/agents-overview).
// Service for managing [Agents][google.cloud.dialogflow.v2.Agent].
service Agents {
option (google.api.default_host) = "dialogflow.googleapis.com";
option (google.api.oauth_scopes) =
Expand Down Expand Up @@ -180,7 +153,16 @@ service Agents {
}
}

// Represents a conversational agent.
// A Dialogflow agent is a virtual agent that handles conversations with your
// end-users. It is a natural language understanding module that understands the
// nuances of human language. Dialogflow translates end-user text or audio
// during a conversation to structured data that your apps and services can
// understand. You design and build a Dialogflow agent to handle the types of
// conversations required for your system.
//
// For more information about agents, see the
// [Agents
// documentation](https://cloud.google.com/dialogflow/docs/agents-overview).
message Agent {
option (google.api.resource) = {
type: "dialogflow.googleapis.com/Agent"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,35 @@ option java_outer_classname = "AudioConfigProto";
option java_package = "com.google.cloud.dialogflow.v2";
option objc_class_prefix = "DF";

// Hints for the speech recognizer to help with recognition in a specific
// conversation state.
message SpeechContext {
// Optional. A list of strings containing words and phrases that the speech
// recognizer should recognize with higher likelihood.
//
// This list can be used to:
// * improve accuracy for words and phrases you expect the user to say,
// e.g. typical commands for your Dialogflow agent
// * add additional words to the speech recognizer vocabulary
// * ...
//
// See the [Cloud Speech
// documentation](https://cloud.google.com/speech-to-text/quotas) for usage
// limits.
repeated string phrases = 1;

// Optional. Boost for this context compared to other contexts:
//
// * If the boost is positive, Dialogflow will increase the probability that
// the phrases in this context are recognized over similar sounding phrases.
// * If the boost is unspecified or non-positive, Dialogflow will not apply
// any boost.
//
// Dialogflow recommends that you use boosts in the range (0, 20] and that you
// find a value that fits your use case with binary search.
float boost = 2;
}

// Audio encoding of the audio content sent in the conversational query request.
// Refer to the
// [Cloud Speech API
Expand Down Expand Up @@ -78,33 +107,29 @@ enum AudioEncoding {
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7;
}

// Hints for the speech recognizer to help with recognition in a specific
// conversation state.
message SpeechContext {
// Optional. A list of strings containing words and phrases that the speech
// recognizer should recognize with higher likelihood.
//
// This list can be used to:
// * improve accuracy for words and phrases you expect the user to say,
// e.g. typical commands for your Dialogflow agent
// * add additional words to the speech recognizer vocabulary
// * ...
//
// See the [Cloud Speech
// documentation](https://cloud.google.com/speech-to-text/quotas) for usage
// limits.
repeated string phrases = 1;
// Information for a word recognized by the speech recognizer.
message SpeechWordInfo {
// The word this info is for.
string word = 3;

// Optional. Boost for this context compared to other contexts:
//
// * If the boost is positive, Dialogflow will increase the probability that
// the phrases in this context are recognized over similar sounding phrases.
// * If the boost is unspecified or non-positive, Dialogflow will not apply
// any boost.
// Time offset relative to the beginning of the audio that corresponds to the
// start of the spoken word. This is an experimental feature and the accuracy
// of the time offset can vary.
google.protobuf.Duration start_offset = 1;

// Time offset relative to the beginning of the audio that corresponds to the
// end of the spoken word. This is an experimental feature and the accuracy of
// the time offset can vary.
google.protobuf.Duration end_offset = 2;

// The Speech confidence between 0.0 and 1.0 for this word. A higher number
// indicates an estimated greater likelihood that the recognized word is
// correct. The default of 0.0 is a sentinel value indicating that confidence
// was not set.
//
// Dialogflow recommends that you use boosts in the range (0, 20] and that you
// find a value that fits your use case with binary search.
float boost = 2;
// This field is not guaranteed to be fully stable over time for the same
// audio input. Users should also not rely on it to always be provided.
float confidence = 4;
}

// Variant of the specified [Speech model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
Expand Down Expand Up @@ -150,31 +175,6 @@ enum SpeechModelVariant {
USE_ENHANCED = 3;
}

// Information for a word recognized by the speech recognizer.
message SpeechWordInfo {
// The word this info is for.
string word = 3;

// Time offset relative to the beginning of the audio that corresponds to the
// start of the spoken word. This is an experimental feature and the accuracy
// of the time offset can vary.
google.protobuf.Duration start_offset = 1;

// Time offset relative to the beginning of the audio that corresponds to the
// end of the spoken word. This is an experimental feature and the accuracy of
// the time offset can vary.
google.protobuf.Duration end_offset = 2;

// The Speech confidence between 0.0 and 1.0 for this word. A higher number
// indicates an estimated greater likelihood that the recognized word is
// correct. The default of 0.0 is a sentinel value indicating that confidence
// was not set.
//
// This field is not guaranteed to be fully stable over time for the same
// audio input. Users should also not rely on it to always be provided.
float confidence = 4;
}

// Instructs the speech recognizer how to process the audio content.
message InputAudioConfig {
// Required. Audio encoding of the audio content to process.
Expand Down Expand Up @@ -248,6 +248,21 @@ message InputAudioConfig {
bool single_utterance = 8;
}

// Description of which voice to use for speech synthesis.
message VoiceSelectionParams {
// Optional. The name of the voice. If not set, the service will choose a
// voice based on the other parameters such as language_code and
// [ssml_gender][google.cloud.dialogflow.v2.VoiceSelectionParams.ssml_gender].
string name = 1;

// Optional. The preferred gender of the voice. If not set, the service will
// choose a voice based on the other parameters such as language_code and
// [name][google.cloud.dialogflow.v2.VoiceSelectionParams.name]. Note that this is only a preference, not requirement. If a
// voice of the appropriate gender is not available, the synthesizer should
// substitute a voice with a different gender rather than failing the request.
SsmlVoiceGender ssml_gender = 2;
}

// Gender of the voice as described in
// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice).
enum SsmlVoiceGender {
Expand All @@ -265,21 +280,6 @@ enum SsmlVoiceGender {
SSML_VOICE_GENDER_NEUTRAL = 3;
}

// Description of which voice to use for speech synthesis.
message VoiceSelectionParams {
// Optional. The name of the voice. If not set, the service will choose a
// voice based on the other parameters such as language_code and
// [ssml_gender][google.cloud.dialogflow.v2.VoiceSelectionParams.ssml_gender].
string name = 1;

// Optional. The preferred gender of the voice. If not set, the service will
// choose a voice based on the other parameters such as language_code and
// [name][google.cloud.dialogflow.v2.VoiceSelectionParams.name]. Note that this is only a preference, not requirement. If a
// voice of the appropriate gender is not available, the synthesizer should
// substitute a voice with a different gender rather than failing the request.
SsmlVoiceGender ssml_gender = 2;
}

// Configuration of how speech should be synthesized.
message SynthesizeSpeechConfig {
// Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
Expand Down Expand Up @@ -312,6 +312,24 @@ message SynthesizeSpeechConfig {
VoiceSelectionParams voice = 4;
}

// Instructs the speech synthesizer on how to generate the output audio content.
// If this audio config is supplied in a request, it overrides all existing
// text-to-speech settings applied to the agent.
message OutputAudioConfig {
// Required. Audio encoding of the synthesized audio content.
OutputAudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED];

// The synthesis sample rate (in hertz) for this audio. If not
// provided, then the synthesizer will use the default sample rate based on
// the audio encoding. If this is different from the voice's natural sample
// rate, then the synthesizer will honor this request by converting to the
// desired sample rate (which might result in worse audio quality).
int32 sample_rate_hertz = 2;

// Configuration of how speech should be synthesized.
SynthesizeSpeechConfig synthesize_speech_config = 3;
}

// Audio encoding of the output audio format in Text-To-Speech.
enum OutputAudioEncoding {
// Not specified.
Expand All @@ -330,21 +348,3 @@ enum OutputAudioEncoding {
// than MP3 while using approximately the same bitrate.
OUTPUT_AUDIO_ENCODING_OGG_OPUS = 3;
}

// Instructs the speech synthesizer on how to generate the output audio content.
// If this audio config is supplied in a request, it overrides all existing
// text-to-speech settings applied to the agent.
message OutputAudioConfig {
// Required. Audio encoding of the synthesized audio content.
OutputAudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED];

// The synthesis sample rate (in hertz) for this audio. If not
// provided, then the synthesizer will use the default sample rate based on
// the audio encoding. If this is different from the voice's natural sample
// rate, then the synthesizer will honor this request by converting to the
// desired sample rate (which might result in worse audio quality).
int32 sample_rate_hertz = 2;

// Configuration of how speech should be synthesized.
SynthesizeSpeechConfig synthesize_speech_config = 3;
}
Loading

0 comments on commit 7f8c820

Please sign in to comment.