From 02d1e77d1cec8bd4655e996a136943b973508525 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:00:27 -0700 Subject: [PATCH 01/17] Integrate Python GAPIC Microgenerator in googleapis. This PR uses using documentai as an example. Depends on https://github.com/googleapis/gapic-generator-python/pull/402 PiperOrigin-RevId: 309824146 Source-Author: Google APIs Source-Date: Mon May 4 15:06:44 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: e0f9d9e1f9de890db765be46f45ca8490723e3eb Source-Link: https://github.com/googleapis/googleapis/commit/e0f9d9e1f9de890db765be46f45ca8490723e3eb --- .../video_intelligence_service_client.py | 18 +- .../proto/video_intelligence_pb2.py | 613 +++++++------ .../video_intelligence_service_client.py | 26 +- .../proto/video_intelligence_pb2.py | 297 +++---- .../video_intelligence_service_client.py | 26 +- .../proto/video_intelligence_pb2.py | 329 ++++--- .../video_intelligence_service_client.py | 26 +- .../proto/video_intelligence_pb2.py | 341 ++++---- ...ideo_intelligence_service_client_config.py | 2 +- .../video_intelligence_service_client.py | 26 +- .../proto/video_intelligence_pb2.py | 821 +++++++++--------- synth.metadata | 30 +- ...deo_intelligence_service_client_v1beta2.py | 10 +- ...o_intelligence_service_client_v1p1beta1.py | 10 +- ...o_intelligence_service_client_v1p2beta1.py | 10 +- ...o_intelligence_service_client_v1p3beta1.py | 10 +- 16 files changed, 1263 insertions(+), 1332 deletions(-) diff --git a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py index 3b92add2..39583518 100644 --- a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py @@ -232,6 +232,7 @@ def annotate_video( >>> metadata = response.metadata() Args: + features (list[~google.cloud.videointelligence_v1.types.Feature]): Required. Requested video annotation features. input_uri (str): Input video location. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` @@ -239,19 +240,18 @@ def annotate_video( more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus identify - multiple videos. Supported wildcards: '\*' to match 0 or more - characters; '?' to match 1 character. If unset, the input video should - be embedded in the request as ``input_content``. If set, - ``input_content`` should be unset. - input_content (bytes): The video data bytes. If unset, the input video(s) should be specified - via ``input_uri``. If set, ``input_uri`` should be unset. - features (list[~google.cloud.videointelligence_v1.types.Feature]): Required. Requested video annotation features. + multiple videos. Supported wildcards: '*' to match 0 or more characters; + '?' to match 1 character. If unset, the input video should be embedded + in the request as ``input_content``. If set, ``input_content`` should be + unset. + input_content (bytes): The video data bytes. If unset, the input video(s) should be + specified via ``input_uri``. If set, ``input_uri`` should be unset. video_context (Union[dict, ~google.cloud.videointelligence_v1.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1.types.VideoContext` - output_uri (str): Optional. Location where the output (in JSON format) should be stored. - Currently, only `Google Cloud + output_uri (str): Optional. Location where the output (in JSON format) should be + stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For diff --git a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py index ceea9d8a..f5d356d3 100644 --- a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/videointelligence_v1/proto/video_intelligence.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -31,12 +28,8 @@ name="google/cloud/videointelligence_v1/proto/video_intelligence.proto", package="google.cloud.videointelligence.v1", syntax="proto3", - serialized_options=_b( - "\n%com.google.cloud.videointelligence.v1B\035VideoIntelligenceServiceProtoP\001ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\252\002!Google.Cloud.VideoIntelligence.V1\312\002!Google\\Cloud\\VideoIntelligence\\V1\352\002$Google::Cloud::VideoIntelligence::V1" - ), - serialized_pb=_b( - '\n@google/cloud/videointelligence_v1/proto/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xfe\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x41\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.FeatureB\x03\xe0\x41\x02\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xe6\x05\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12U\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.FaceDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\x94\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood"d\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"O\n\x0b\x46\x61\x63\x65Segment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"\x98\x01\n\tFaceFrame\x12[\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xa3\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.FaceSegment\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.FaceFrame"\xba\x02\n\x11TimestampedObject\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12K\n\tlandmarks\x18\x04 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.DetectedLandmarkB\x03\xe0\x41\x01"\x84\x02\n\x05Track\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Q\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.TimestampedObject\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"x\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x05point\x18\x02 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"\xa1\t\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12@\n\x07segment\x18\n \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12^\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12[\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12K\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.FaceAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12\x62\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32<.google.cloud.videointelligence.v1.LogoRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults"\xa6\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12@\n\x07segment\x18\x06 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress"\x81\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12N\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x88\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x8c\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12?\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"`\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x97\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrameB\x0c\n\ntrack_info"\xd3\x01\n\x19LogoRecognitionAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x38\n\x06tracks\x18\x02 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment*\xdf\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc0\x02\n\x18VideoIntelligenceService\x12\xcd\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"d\x82\xd3\xe4\x93\x02\x18"\x13/v1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x8b\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3' - ), + serialized_options=b"\n%com.google.cloud.videointelligence.v1B\035VideoIntelligenceServiceProtoP\001ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\252\002!Google.Cloud.VideoIntelligence.V1\312\002!Google\\Cloud\\VideoIntelligence\\V1\352\002$Google::Cloud::VideoIntelligence::V1", + serialized_pb=b'\n@google/cloud/videointelligence_v1/proto/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xfe\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x41\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.FeatureB\x03\xe0\x41\x02\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xe6\x05\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12U\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.FaceDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\x94\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood"d\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"O\n\x0b\x46\x61\x63\x65Segment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"\x98\x01\n\tFaceFrame\x12[\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xa3\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.FaceSegment\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.FaceFrame"\xba\x02\n\x11TimestampedObject\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12K\n\tlandmarks\x18\x04 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.DetectedLandmarkB\x03\xe0\x41\x01"\x84\x02\n\x05Track\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Q\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.TimestampedObject\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"x\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x05point\x18\x02 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"\xa1\t\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12@\n\x07segment\x18\n \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12^\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12[\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12K\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.FaceAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12\x62\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32<.google.cloud.videointelligence.v1.LogoRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults"\xa6\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12@\n\x07segment\x18\x06 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress"\x81\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12N\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x88\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x8c\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12?\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"`\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x97\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrameB\x0c\n\ntrack_info"\xd3\x01\n\x19LogoRecognitionAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x38\n\x06tracks\x18\x02 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment*\xdf\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc0\x02\n\x18VideoIntelligenceService\x12\xcd\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"d\x82\xd3\xe4\x93\x02\x18"\x13/v1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x8b\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -227,7 +220,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -245,7 +238,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -269,7 +262,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -299,13 +292,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -317,13 +310,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -557,7 +550,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -632,7 +625,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -671,7 +664,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -710,7 +703,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -767,7 +760,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -824,7 +817,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1034,7 +1027,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1052,7 +1045,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1070,7 +1063,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1487,7 +1480,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1604,7 +1597,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1622,7 +1615,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -1697,7 +1690,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1715,7 +1708,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -1748,7 +1741,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1784,7 +1777,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1823,7 +1816,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1898,7 +1891,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2228,7 +2221,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2396,13 +2389,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2420,7 +2413,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2438,7 +2431,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2456,7 +2449,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2474,7 +2467,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2492,7 +2485,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2510,7 +2503,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2528,7 +2521,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2546,7 +2539,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -2585,7 +2578,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ) ], @@ -2636,13 +2629,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), ], @@ -2675,7 +2668,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2699,7 +2692,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2717,7 +2710,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), ], @@ -2786,7 +2779,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2810,7 +2803,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2828,7 +2821,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), ], @@ -3089,7 +3082,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -3604,10 +3597,10 @@ AnnotateVideoRequest = _reflection.GeneratedProtocolMessageType( "AnnotateVideoRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOREQUEST, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video annotation request. + { + "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video annotation request. Attributes: @@ -3616,12 +3609,12 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus - identify multiple videos. Supported wildcards: '\*' to match 0 - or more characters; '?' to match 1 character. If unset, the + identify multiple videos. Supported wildcards: ’*’ to match 0 + or more characters; ‘?’ to match 1 character. If unset, the input video should be embedded in the request as ``input_content``. If set, ``input_content`` should be unset. input_content: @@ -3638,8 +3631,8 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. location_id: Optional. Cloud region where annotation should take place. @@ -3648,17 +3641,17 @@ will be determined based on video file location. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.AnnotateVideoRequest) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoRequest) VideoContext = _reflection.GeneratedProtocolMessageType( "VideoContext", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCONTEXT, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video context and/or feature-specific parameters. + { + "DESCRIPTOR": _VIDEOCONTEXT, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video context and/or feature-specific parameters. Attributes: @@ -3667,47 +3660,47 @@ not required to be contiguous or span the whole video. If unspecified, each video is treated as a single segment. label_detection_config: - Config for LABEL\_DETECTION. + Config for LABEL_DETECTION. shot_change_detection_config: - Config for SHOT\_CHANGE\_DETECTION. + Config for SHOT_CHANGE_DETECTION. explicit_content_detection_config: - Config for EXPLICIT\_CONTENT\_DETECTION. + Config for EXPLICIT_CONTENT_DETECTION. face_detection_config: - Config for FACE\_DETECTION. + Config for FACE_DETECTION. speech_transcription_config: - Config for SPEECH\_TRANSCRIPTION. + Config for SPEECH_TRANSCRIPTION. text_detection_config: - Config for TEXT\_DETECTION. + Config for TEXT_DETECTION. object_tracking_config: - Config for OBJECT\_TRACKING. + Config for OBJECT_TRACKING. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoContext) - ), + }, ) _sym_db.RegisterMessage(VideoContext) LabelDetectionConfig = _reflection.GeneratedProtocolMessageType( "LabelDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_LABELDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for LABEL\_DETECTION. + { + "DESCRIPTOR": _LABELDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Config for LABEL_DETECTION. Attributes: label_detection_mode: - What labels should be detected with LABEL\_DETECTION, in + What labels should be detected with LABEL_DETECTION, in addition to video-level labels or segment-level labels. If unspecified, defaults to ``SHOT_MODE``. stationary_camera: - Whether the video has been shot from a stationary (i.e. non- + Whether the video has been shot from a stationary (i.e. non- moving) camera. When set to true, might improve detection accuracy for moving objects. Should be used with ``SHOT_AND_FRAME_MODE`` enabled. model: Model to use for label detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. frame_confidence_threshold: The confidence threshold we perform filtering on the labels from frame-level detection. If not set, it is set to 0.4 by @@ -3726,96 +3719,96 @@ we release a new model. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(LabelDetectionConfig) ShotChangeDetectionConfig = _reflection.GeneratedProtocolMessageType( "ShotChangeDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_SHOTCHANGEDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for SHOT\_CHANGE\_DETECTION. + { + "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Config for SHOT_CHANGE_DETECTION. Attributes: model: Model to use for shot change detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ShotChangeDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ShotChangeDetectionConfig) ObjectTrackingConfig = _reflection.GeneratedProtocolMessageType( "ObjectTrackingConfig", (_message.Message,), - dict( - DESCRIPTOR=_OBJECTTRACKINGCONFIG, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for OBJECT\_TRACKING. + { + "DESCRIPTOR": _OBJECTTRACKINGCONFIG, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Config for OBJECT_TRACKING. Attributes: model: Model to use for object tracking. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingConfig) - ), + }, ) _sym_db.RegisterMessage(ObjectTrackingConfig) FaceDetectionConfig = _reflection.GeneratedProtocolMessageType( "FaceDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_FACEDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for FACE\_DETECTION. + { + "DESCRIPTOR": _FACEDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Config for FACE_DETECTION. Attributes: model: Model to use for face detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. include_bounding_boxes: Whether bounding boxes be included in the face annotation output. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(FaceDetectionConfig) ExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( "ExplicitContentDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for EXPLICIT\_CONTENT\_DETECTION. + { + "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. Attributes: model: Model to use for explicit content detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ExplicitContentDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentDetectionConfig) TextDetectionConfig = _reflection.GeneratedProtocolMessageType( "TextDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_TEXTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for TEXT\_DETECTION. + { + "DESCRIPTOR": _TEXTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Config for TEXT_DETECTION. Attributes: @@ -3827,20 +3820,20 @@ is provided. model: Model to use for text detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(TextDetectionConfig) VideoSegment = _reflection.GeneratedProtocolMessageType( "VideoSegment", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOSEGMENT, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video segment. + { + "DESCRIPTOR": _VIDEOSEGMENT, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video segment. Attributes: @@ -3852,17 +3845,17 @@ corresponding to the end of the segment (inclusive). """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoSegment) - ), + }, ) _sym_db.RegisterMessage(VideoSegment) LabelSegment = _reflection.GeneratedProtocolMessageType( "LabelSegment", (_message.Message,), - dict( - DESCRIPTOR=_LABELSEGMENT, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for label + { + "DESCRIPTOR": _LABELSEGMENT, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for label detection. @@ -3873,17 +3866,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelSegment) - ), + }, ) _sym_db.RegisterMessage(LabelSegment) LabelFrame = _reflection.GeneratedProtocolMessageType( "LabelFrame", (_message.Message,), - dict( - DESCRIPTOR=_LABELFRAME, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for label detection. + { + "DESCRIPTOR": _LABELFRAME, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for label detection. Attributes: @@ -3894,17 +3887,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelFrame) - ), + }, ) _sym_db.RegisterMessage(LabelFrame) Entity = _reflection.GeneratedProtocolMessageType( "Entity", (_message.Message,), - dict( - DESCRIPTOR=_ENTITY, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Detected entity from video analysis. + { + "DESCRIPTOR": _ENTITY, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Detected entity from video analysis. Attributes: @@ -3918,17 +3911,17 @@ Language code for ``description`` in BCP-47 format. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.Entity) - ), + }, ) _sym_db.RegisterMessage(Entity) LabelAnnotation = _reflection.GeneratedProtocolMessageType( "LabelAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_LABELANNOTATION, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Label annotation. + { + "DESCRIPTOR": _LABELANNOTATION, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Label annotation. Attributes: @@ -3945,17 +3938,17 @@ All video frames where a label was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LabelAnnotation) - ), + }, ) _sym_db.RegisterMessage(LabelAnnotation) ExplicitContentFrame = _reflection.GeneratedProtocolMessageType( "ExplicitContentFrame", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTFRAME, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for explicit content. + { + "DESCRIPTOR": _EXPLICITCONTENTFRAME, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for explicit content. Attributes: @@ -3966,17 +3959,17 @@ Likelihood of the pornography content.. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ExplicitContentFrame) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentFrame) ExplicitContentAnnotation = _reflection.GeneratedProtocolMessageType( "ExplicitContentAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTANNOTATION, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual + { + "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. @@ -3986,17 +3979,17 @@ All video frames where explicit content was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ExplicitContentAnnotation) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentAnnotation) NormalizedBoundingBox = _reflection.GeneratedProtocolMessageType( "NormalizedBoundingBox", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDBOUNDINGBOX, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding box. The normalized vertex coordinates + { + "DESCRIPTOR": _NORMALIZEDBOUNDINGBOX, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1]. @@ -4011,17 +4004,17 @@ Bottom Y coordinate. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.NormalizedBoundingBox) - ), + }, ) _sym_db.RegisterMessage(NormalizedBoundingBox) FaceSegment = _reflection.GeneratedProtocolMessageType( "FaceSegment", (_message.Message,), - dict( - DESCRIPTOR=_FACESEGMENT, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for face detection. + { + "DESCRIPTOR": _FACESEGMENT, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for face detection. Attributes: @@ -4029,17 +4022,17 @@ Video segment where a face was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceSegment) - ), + }, ) _sym_db.RegisterMessage(FaceSegment) FaceFrame = _reflection.GeneratedProtocolMessageType( "FaceFrame", (_message.Message,), - dict( - DESCRIPTOR=_FACEFRAME, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for face detection. + { + "DESCRIPTOR": _FACEFRAME, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for face detection. Attributes: @@ -4052,17 +4045,17 @@ corresponding to the video frame for this location. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceFrame) - ), + }, ) _sym_db.RegisterMessage(FaceFrame) FaceAnnotation = _reflection.GeneratedProtocolMessageType( "FaceAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_FACEANNOTATION, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Face annotation. + { + "DESCRIPTOR": _FACEANNOTATION, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Face annotation. Attributes: @@ -4074,18 +4067,18 @@ All video frames where a face was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceAnnotation) - ), + }, ) _sym_db.RegisterMessage(FaceAnnotation) TimestampedObject = _reflection.GeneratedProtocolMessageType( "TimestampedObject", (_message.Message,), - dict( - DESCRIPTOR=_TIMESTAMPEDOBJECT, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""For tracking related features. An object at time\_offset - with attributes, and located with normalized\_bounding\_box. + { + "DESCRIPTOR": _TIMESTAMPEDOBJECT, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """For tracking related features. An object at time_offset + with attributes, and located with normalized_bounding_box. Attributes: @@ -4101,17 +4094,17 @@ Optional. The detected landmarks. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TimestampedObject) - ), + }, ) _sym_db.RegisterMessage(TimestampedObject) Track = _reflection.GeneratedProtocolMessageType( "Track", (_message.Message,), - dict( - DESCRIPTOR=_TRACK, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""A track of an object instance. + { + "DESCRIPTOR": _TRACK, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """A track of an object instance. Attributes: @@ -4126,49 +4119,49 @@ Optional. The confidence score of the tracked object. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.Track) - ), + }, ) _sym_db.RegisterMessage(Track) DetectedAttribute = _reflection.GeneratedProtocolMessageType( "DetectedAttribute", (_message.Message,), - dict( - DESCRIPTOR=_DETECTEDATTRIBUTE, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""A generic detected attribute represented by name in string + { + "DESCRIPTOR": _DETECTEDATTRIBUTE, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """A generic detected attribute represented by name in string format. Attributes: name: - The name of the attribute, i.e. glasses, dark\_glasses, - mouth\_open etc. A full list of supported type names will be + The name of the attribute, i.e. glasses, dark_glasses, + mouth_open etc. A full list of supported type names will be provided in the document. confidence: Detected attribute confidence. Range [0, 1]. value: Text value of the detection result. For example, the value for - "HairColor" can be "black", "blonde", etc. + “HairColor” can be “black”, “blonde”, etc. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.DetectedAttribute) - ), + }, ) _sym_db.RegisterMessage(DetectedAttribute) DetectedLandmark = _reflection.GeneratedProtocolMessageType( "DetectedLandmark", (_message.Message,), - dict( - DESCRIPTOR=_DETECTEDLANDMARK, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""A generic detected landmark represented by name in string + { + "DESCRIPTOR": _DETECTEDLANDMARK, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """A generic detected landmark represented by name in string format and a 2D location. Attributes: name: - The name of this landmark, i.e. left\_hand, right\_shoulder. + The name of this landmark, i.e. left_hand, right_shoulder. point: The 2D point of the detected landmark using the normalized image coordindate system. The normalized coordinates have the @@ -4177,17 +4170,17 @@ The confidence score of the detected landmark. Range [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.DetectedLandmark) - ), + }, ) _sym_db.RegisterMessage(DetectedLandmark) VideoAnnotationResults = _reflection.GeneratedProtocolMessageType( "VideoAnnotationResults", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONRESULTS, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Annotation results for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Annotation results for a single video. Attributes: @@ -4207,7 +4200,7 @@ ``segment_label_annotations``, this field presents more fine- grained, segment-level labels detected in video content and is made available only when the client sets - ``LabelDetectionConfig.model`` to "builtin/latest" in the + ``LabelDetectionConfig.model`` to “builtin/latest” in the request. shot_label_annotations: Topical label annotations on shot level. There is exactly one @@ -4218,7 +4211,7 @@ topical ``shot_label_annotations``, this field presents more fine-grained, shot-level labels detected in video content and is made available only when the client sets - ``LabelDetectionConfig.model`` to "builtin/latest" in the + ``LabelDetectionConfig.model`` to “builtin/latest” in the request. frame_label_annotations: Label annotations on frame level. There is exactly one element @@ -4247,17 +4240,17 @@ fail. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoAnnotationResults) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationResults) AnnotateVideoResponse = _reflection.GeneratedProtocolMessageType( "AnnotateVideoResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEORESPONSE, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` + { + "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -4268,17 +4261,17 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.AnnotateVideoResponse) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoResponse) VideoAnnotationProgress = _reflection.GeneratedProtocolMessageType( "VideoAnnotationProgress", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONPROGRESS, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Annotation progress for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Annotation progress for a single video. Attributes: @@ -4300,17 +4293,17 @@ contains more than one segments. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoAnnotationProgress) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationProgress) AnnotateVideoProgress = _reflection.GeneratedProtocolMessageType( "AnnotateVideoProgress", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` + { + "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -4321,24 +4314,24 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.AnnotateVideoProgress) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoProgress) SpeechTranscriptionConfig = _reflection.GeneratedProtocolMessageType( "SpeechTranscriptionConfig", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHTRANSCRIPTIONCONFIG, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for SPEECH\_TRANSCRIPTION. + { + "DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Config for SPEECH_TRANSCRIPTION. Attributes: language_code: Required. *Required* The language of the supplied audio as a `BCP-47 `__ - language tag. Example: "en-US". See `Language Support + language tag. Example: “en-US”. See `Language Support `__ for a list of the currently supported language codes. max_alternatives: @@ -4352,29 +4345,29 @@ filter_profanity: Optional. If set to ``true``, the server will attempt to filter out profanities, replacing all but the initial - character in each filtered word with asterisks, e.g. - "f\*\*\*". If set to ``false`` or omitted, profanities won't - be filtered out. + character in each filtered word with asterisks, e.g. "f***". + If set to ``false`` or omitted, profanities won’t be filtered + out. speech_contexts: Optional. A means to provide context to assist the speech recognition. enable_automatic_punctuation: - Optional. If 'true', adds punctuation to recognition result + Optional. If ‘true’, adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no - effect at all. The default 'false' value does not add - punctuation to result hypotheses. NOTE: "This is currently + effect at all. The default ‘false’ value does not add + punctuation to result hypotheses. NOTE: “This is currently offered as an experimental service, complimentary to all users. In the future this may be exclusively available as a - premium feature." + premium feature.” audio_tracks: Optional. For file formats, such as MXF or MKV, supporting multiple audio tracks, specify up to two tracks. Default: track 0. enable_speaker_diarization: - Optional. If 'true', enables speaker detection for each + Optional. If ‘true’, enables speaker detection for each recognized word in the top alternative of the recognition - result using a speaker\_tag provided in the WordInfo. Note: + result using a speaker_tag provided in the WordInfo. Note: When this is true, we send all the words from the beginning of the audio for the top alternative in every consecutive responses. This is done in order to improve our speaker tags @@ -4382,8 +4375,8 @@ conversation over time. diarization_speaker_count: Optional. If set, specifies the estimated number of speakers - in the conversation. If not set, defaults to '2'. Ignored - unless enable\_speaker\_diarization is set to true. + in the conversation. If not set, defaults to ‘2’. Ignored + unless enable_speaker_diarization is set to true. enable_word_confidence: Optional. If ``true``, the top result includes a list of words and the confidence for those words. If ``false``, no word- @@ -4391,24 +4384,24 @@ ``false``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechTranscriptionConfig) - ), + }, ) _sym_db.RegisterMessage(SpeechTranscriptionConfig) SpeechContext = _reflection.GeneratedProtocolMessageType( "SpeechContext", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHCONTEXT, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Provides "hints" to the speech recognizer to favor specific words and - phrases in the results. + { + "DESCRIPTOR": _SPEECHCONTEXT, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Provides “hints” to the speech recognizer to favor + specific words and phrases in the results. Attributes: phrases: Optional. A list of strings containing words and phrases - "hints" so that the speech recognition is more likely to + “hints” so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add @@ -4417,17 +4410,17 @@ `__. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechContext) - ), + }, ) _sym_db.RegisterMessage(SpeechContext) SpeechTranscription = _reflection.GeneratedProtocolMessageType( "SpeechTranscription", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHTRANSCRIPTION, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""A speech recognition result corresponding to a portion of + { + "DESCRIPTOR": _SPEECHTRANSCRIPTION, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """A speech recognition result corresponding to a portion of the audio. @@ -4445,17 +4438,17 @@ most likelihood of being spoken in the audio. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechTranscription) - ), + }, ) _sym_db.RegisterMessage(SpeechTranscription) SpeechRecognitionAlternative = _reflection.GeneratedProtocolMessageType( "SpeechRecognitionAlternative", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHRECOGNITIONALTERNATIVE, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Alternative hypotheses (a.k.a. n-best list). + { + "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Alternative hypotheses (a.k.a. n-best list). Attributes: @@ -4476,17 +4469,17 @@ audio. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechRecognitionAlternative) - ), + }, ) _sym_db.RegisterMessage(SpeechRecognitionAlternative) WordInfo = _reflection.GeneratedProtocolMessageType( "WordInfo", (_message.Message,), - dict( - DESCRIPTOR=_WORDINFO, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Word-specific information for recognized words. Word + { + "DESCRIPTOR": _WORDINFO, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as ``enable_word_time_offsets``. @@ -4518,21 +4511,21 @@ Output only. A distinct integer value is assigned for every speaker within the audio. This field specifies which one of those speakers was detected to have spoken this word. Value - ranges from 1 up to diarization\_speaker\_count, and is only - set if speaker diarization is enabled. + ranges from 1 up to diarization_speaker_count, and is only set + if speaker diarization is enabled. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.WordInfo) - ), + }, ) _sym_db.RegisterMessage(WordInfo) NormalizedVertex = _reflection.GeneratedProtocolMessageType( "NormalizedVertex", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDVERTEX, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""X coordinate. + { + "DESCRIPTOR": _NORMALIZEDVERTEX, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """X coordinate. Attributes: @@ -4540,24 +4533,24 @@ Y coordinate. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.NormalizedVertex) - ), + }, ) _sym_db.RegisterMessage(NormalizedVertex) NormalizedBoundingPoly = _reflection.GeneratedProtocolMessageType( "NormalizedBoundingPoly", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDBOUNDINGPOLY, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding polygon for text (that might not be + { + "DESCRIPTOR": _NORMALIZEDBOUNDINGPOLY, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular - bounding box: When the text is horizontal it might look like: 0----1 \| - \| 3----2 + bounding box: When the text is horizontal it might look like: 0—-1 \| \| + 3—-2 - When it's clockwise rotated 180 degrees around the top-left corner it - becomes: 2----3 \| \| 1----0 + When it’s clockwise rotated 180 degrees around the top-left corner it + becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for @@ -4569,17 +4562,17 @@ Normalized vertices of the bounding polygon. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.NormalizedBoundingPoly) - ), + }, ) _sym_db.RegisterMessage(NormalizedBoundingPoly) TextSegment = _reflection.GeneratedProtocolMessageType( "TextSegment", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSEGMENT, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for text detection. + { + "DESCRIPTOR": _TEXTSEGMENT, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for text detection. Attributes: @@ -4593,17 +4586,17 @@ appears. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextSegment) - ), + }, ) _sym_db.RegisterMessage(TextSegment) TextFrame = _reflection.GeneratedProtocolMessageType( "TextFrame", (_message.Message,), - dict( - DESCRIPTOR=_TEXTFRAME, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for text annotation + { + "DESCRIPTOR": _TEXTFRAME, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets. @@ -4615,17 +4608,17 @@ Timestamp of this frame. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextFrame) - ), + }, ) _sym_db.RegisterMessage(TextFrame) TextAnnotation = _reflection.GeneratedProtocolMessageType( "TextAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TEXTANNOTATION, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Annotations related to one detected OCR text snippet. This + { + "DESCRIPTOR": _TEXTANNOTATION, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection. @@ -4637,17 +4630,17 @@ All video segments where OCR detected text appears. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextAnnotation) - ), + }, ) _sym_db.RegisterMessage(TextAnnotation) ObjectTrackingFrame = _reflection.GeneratedProtocolMessageType( "ObjectTrackingFrame", (_message.Message,), - dict( - DESCRIPTOR=_OBJECTTRACKINGFRAME, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotations for object detection and + { + "DESCRIPTOR": _OBJECTTRACKINGFRAME, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence. @@ -4660,17 +4653,17 @@ The timestamp of the frame in microseconds. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingFrame) - ), + }, ) _sym_db.RegisterMessage(ObjectTrackingFrame) ObjectTrackingAnnotation = _reflection.GeneratedProtocolMessageType( "ObjectTrackingAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_OBJECTTRACKINGANNOTATION, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Annotations corresponding to one tracked object. + { + "DESCRIPTOR": _OBJECTTRACKINGANNOTATION, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Annotations corresponding to one tracked object. Attributes: @@ -4684,14 +4677,14 @@ Streaming mode ONLY. In streaming mode, we do not know the end time of a tracked object before it is completed. Hence, there is no VideoSegment info returned. Instead, we provide a unique - identifiable integer track\_id so that the customers can + identifiable integer track_id so that the customers can correlate the results of the ongoing ObjectTrackAnnotation of - the same track\_id over time. + the same track_id over time. entity: Entity to specify the object category that this track is labeled as. confidence: - Object category's labeling confidence of this track. + Object category’s labeling confidence of this track. frames: Information corresponding to all frames where this object track appears. Non-streaming batch mode: it may be one or @@ -4700,17 +4693,17 @@ frames. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingAnnotation) - ), + }, ) _sym_db.RegisterMessage(ObjectTrackingAnnotation) LogoRecognitionAnnotation = _reflection.GeneratedProtocolMessageType( "LogoRecognitionAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_LOGORECOGNITIONANNOTATION, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Annotation corresponding to one detected, tracked and + { + "DESCRIPTOR": _LOGORECOGNITIONANNOTATION, + "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", + "__doc__": """Annotation corresponding to one detected, tracked and recognized logo class. @@ -4729,7 +4722,7 @@ in one VideoSegment. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.LogoRecognitionAnnotation) - ), + }, ) _sym_db.RegisterMessage(LogoRecognitionAnnotation) @@ -4765,9 +4758,7 @@ full_name="google.cloud.videointelligence.v1.VideoIntelligenceService", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), + serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", serialized_start=8239, serialized_end=8559, methods=[ @@ -4778,9 +4769,7 @@ containing_service=None, input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002\030"\023/v1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress' - ), + serialized_options=b'\202\323\344\223\002\030"\023/v1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', ) ], ) diff --git a/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py index 72fd9a1f..a467f189 100644 --- a/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py @@ -191,9 +191,9 @@ def __init__( # Service calls def annotate_video( self, + features, input_uri=None, input_content=None, - features=None, video_context=None, output_uri=None, location_id=None, @@ -213,11 +213,11 @@ def annotate_video( >>> >>> client = videointelligence_v1beta2.VideoIntelligenceServiceClient() >>> - >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> features_element = enums.Feature.LABEL_DETECTION >>> features = [features_element] + >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> - >>> response = client.annotate_video(input_uri=input_uri, features=features) + >>> response = client.annotate_video(features, input_uri=input_uri) >>> >>> def callback(operation_future): ... # Handle result. @@ -229,6 +229,7 @@ def annotate_video( >>> metadata = response.metadata() Args: + features (list[~google.cloud.videointelligence_v1beta2.types.Feature]): Required. Requested video annotation features. input_uri (str): Input video location. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` @@ -236,19 +237,18 @@ def annotate_video( more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus identify - multiple videos. Supported wildcards: '\*' to match 0 or more - characters; '?' to match 1 character. If unset, the input video should - be embedded in the request as ``input_content``. If set, - ``input_content`` should be unset. - input_content (bytes): The video data bytes. If unset, the input video(s) should be specified - via ``input_uri``. If set, ``input_uri`` should be unset. - features (list[~google.cloud.videointelligence_v1beta2.types.Feature]): Required. Requested video annotation features. + multiple videos. Supported wildcards: '*' to match 0 or more characters; + '?' to match 1 character. If unset, the input video should be embedded + in the request as ``input_content``. If set, ``input_content`` should be + unset. + input_content (bytes): The video data bytes. If unset, the input video(s) should be + specified via ``input_uri``. If set, ``input_uri`` should be unset. video_context (Union[dict, ~google.cloud.videointelligence_v1beta2.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1beta2.types.VideoContext` - output_uri (str): Optional. Location where the output (in JSON format) should be stored. - Currently, only `Google Cloud + output_uri (str): Optional. Location where the output (in JSON format) should be + stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For @@ -289,9 +289,9 @@ def annotate_video( ) request = video_intelligence_pb2.AnnotateVideoRequest( + features=features, input_uri=input_uri, input_content=input_content, - features=features, video_context=video_context, output_uri=output_uri, location_id=location_id, diff --git a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py index d72ea1e2..6cee1805 100644 --- a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -31,12 +28,8 @@ name="google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto", package="google.cloud.videointelligence.v1beta2", syntax="proto3", - serialized_options=_b( - "\n*com.google.cloud.videointelligence.v1beta2B\035VideoIntelligenceServiceProtoP\001ZWgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence\252\002&Google.Cloud.VideoIntelligence.V1Beta2\312\002&Google\\Cloud\\VideoIntelligence\\V1beta2\352\002)Google::Cloud::VideoIntelligence::V1beta2" - ), - serialized_pb=_b( - '\nEgoogle/cloud/videointelligence_v1beta2/proto/video_intelligence.proto\x12&google.cloud.videointelligence.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x88\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x46\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32/.google.cloud.videointelligence.v1beta2.FeatureB\x03\xe0\x41\x02\x12K\n\rvideo_context\x18\x03 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xec\x03\n\x0cVideoContext\x12\x46\n\x08segments\x18\x01 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\\\n\x16label_detection_config\x18\x02 \x01(\x0b\x32<.google.cloud.videointelligence.v1beta2.LabelDetectionConfig\x12g\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig\x12q\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x46.google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig\x12Z\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32;.google.cloud.videointelligence.v1beta2.FaceDetectionConfig"\x9a\x01\n\x14LabelDetectionConfig\x12X\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1beta2.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"i\n\x0cLabelSegment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xa8\x02\n\x0fLabelAnnotation\x12>\n\x06\x65ntity\x18\x01 \x01(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12I\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12\x46\n\x08segments\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.LabelSegment\x12\x42\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1beta2.LabelFrame"\x9a\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12R\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x32.google.cloud.videointelligence.v1beta2.Likelihood"i\n\x19\x45xplicitContentAnnotation\x12L\n\x06\x66rames\x18\x01 \x03(\x0b\x32<.google.cloud.videointelligence.v1beta2.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"T\n\x0b\x46\x61\x63\x65Segment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment"\x9d\x01\n\tFaceFrame\x12`\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32=.google.cloud.videointelligence.v1beta2.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xad\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12\x45\n\x08segments\x18\x02 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1beta2.FaceSegment\x12\x41\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1beta2.FaceFrame"\xdf\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12Z\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12W\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12X\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12P\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1beta2.FaceAnnotation\x12N\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12^\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"s\n\x15\x41nnotateVideoResponse\x12Z\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1beta2.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"u\n\x15\x41nnotateVideoProgress\x12\\\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1beta2.VideoAnnotationProgress*\x86\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xca\x02\n\x18VideoIntelligenceService\x12\xd7\x01\n\rAnnotateVideo\x12<.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"i\x82\xd3\xe4\x93\x02\x1d"\x18/v1beta2/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xa4\x02\n*com.google.cloud.videointelligence.v1beta2B\x1dVideoIntelligenceServiceProtoP\x01ZWgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence\xaa\x02&Google.Cloud.VideoIntelligence.V1Beta2\xca\x02&Google\\Cloud\\VideoIntelligence\\V1beta2\xea\x02)Google::Cloud::VideoIntelligence::V1beta2b\x06proto3' - ), + serialized_options=b"\n*com.google.cloud.videointelligence.v1beta2B\035VideoIntelligenceServiceProtoP\001ZWgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence\252\002&Google.Cloud.VideoIntelligence.V1Beta2\312\002&Google\\Cloud\\VideoIntelligence\\V1beta2\352\002)Google::Cloud::VideoIntelligence::V1beta2", + serialized_pb=b'\nEgoogle/cloud/videointelligence_v1beta2/proto/video_intelligence.proto\x12&google.cloud.videointelligence.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x88\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x46\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32/.google.cloud.videointelligence.v1beta2.FeatureB\x03\xe0\x41\x02\x12K\n\rvideo_context\x18\x03 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xec\x03\n\x0cVideoContext\x12\x46\n\x08segments\x18\x01 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\\\n\x16label_detection_config\x18\x02 \x01(\x0b\x32<.google.cloud.videointelligence.v1beta2.LabelDetectionConfig\x12g\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig\x12q\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x46.google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig\x12Z\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32;.google.cloud.videointelligence.v1beta2.FaceDetectionConfig"\x9a\x01\n\x14LabelDetectionConfig\x12X\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1beta2.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"i\n\x0cLabelSegment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xa8\x02\n\x0fLabelAnnotation\x12>\n\x06\x65ntity\x18\x01 \x01(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12I\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12\x46\n\x08segments\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.LabelSegment\x12\x42\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1beta2.LabelFrame"\x9a\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12R\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x32.google.cloud.videointelligence.v1beta2.Likelihood"i\n\x19\x45xplicitContentAnnotation\x12L\n\x06\x66rames\x18\x01 \x03(\x0b\x32<.google.cloud.videointelligence.v1beta2.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"T\n\x0b\x46\x61\x63\x65Segment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment"\x9d\x01\n\tFaceFrame\x12`\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32=.google.cloud.videointelligence.v1beta2.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xad\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12\x45\n\x08segments\x18\x02 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1beta2.FaceSegment\x12\x41\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1beta2.FaceFrame"\xdf\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12Z\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12W\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12X\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12P\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1beta2.FaceAnnotation\x12N\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12^\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"s\n\x15\x41nnotateVideoResponse\x12Z\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1beta2.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"u\n\x15\x41nnotateVideoProgress\x12\\\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1beta2.VideoAnnotationProgress*\x86\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xca\x02\n\x18VideoIntelligenceService\x12\xd7\x01\n\rAnnotateVideo\x12<.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"i\x82\xd3\xe4\x93\x02\x1d"\x18/v1beta2/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xa4\x02\n*com.google.cloud.videointelligence.v1beta2B\x1dVideoIntelligenceServiceProtoP\x01ZWgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence\xaa\x02&Google.Cloud.VideoIntelligence.V1Beta2\xca\x02&Google\\Cloud\\VideoIntelligence\\V1beta2\xea\x02)Google::Cloud::VideoIntelligence::V1beta2b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -199,7 +192,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -217,7 +210,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -241,7 +234,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -271,13 +264,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -289,13 +282,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -475,7 +468,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -514,7 +507,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -553,7 +546,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -592,7 +585,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -820,7 +813,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -838,7 +831,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -856,7 +849,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1273,7 +1266,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1348,7 +1341,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1552,7 +1545,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1779,10 +1772,10 @@ AnnotateVideoRequest = _reflection.GeneratedProtocolMessageType( "AnnotateVideoRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOREQUEST, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video annotation request. + { + "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video annotation request. Attributes: @@ -1791,12 +1784,12 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus - identify multiple videos. Supported wildcards: '\*' to match 0 - or more characters; '?' to match 1 character. If unset, the + identify multiple videos. Supported wildcards: ’*’ to match 0 + or more characters; ‘?’ to match 1 character. If unset, the input video should be embedded in the request as ``input_content``. If set, ``input_content`` should be unset. input_content: @@ -1813,8 +1806,8 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. location_id: Optional. Cloud region where annotation should take place. @@ -1823,17 +1816,17 @@ will be determined based on video file location. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.AnnotateVideoRequest) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoRequest) VideoContext = _reflection.GeneratedProtocolMessageType( "VideoContext", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCONTEXT, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video context and/or feature-specific parameters. + { + "DESCRIPTOR": _VIDEOCONTEXT, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video context and/or feature-specific parameters. Attributes: @@ -1842,114 +1835,114 @@ not required to be contiguous or span the whole video. If unspecified, each video is treated as a single segment. label_detection_config: - Config for LABEL\_DETECTION. + Config for LABEL_DETECTION. shot_change_detection_config: - Config for SHOT\_CHANGE\_DETECTION. + Config for SHOT_CHANGE_DETECTION. explicit_content_detection_config: - Config for EXPLICIT\_CONTENT\_DETECTION. + Config for EXPLICIT_CONTENT_DETECTION. face_detection_config: - Config for FACE\_DETECTION. + Config for FACE_DETECTION. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.VideoContext) - ), + }, ) _sym_db.RegisterMessage(VideoContext) LabelDetectionConfig = _reflection.GeneratedProtocolMessageType( "LabelDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_LABELDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Config for LABEL\_DETECTION. + { + "DESCRIPTOR": _LABELDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Config for LABEL_DETECTION. Attributes: label_detection_mode: - What labels should be detected with LABEL\_DETECTION, in + What labels should be detected with LABEL_DETECTION, in addition to video-level labels or segment-level labels. If unspecified, defaults to ``SHOT_MODE``. stationary_camera: - Whether the video has been shot from a stationary (i.e. non- + Whether the video has been shot from a stationary (i.e. non- moving) camera. When set to true, might improve detection accuracy for moving objects. Should be used with ``SHOT_AND_FRAME_MODE`` enabled. model: Model to use for label detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.LabelDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(LabelDetectionConfig) ShotChangeDetectionConfig = _reflection.GeneratedProtocolMessageType( "ShotChangeDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_SHOTCHANGEDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Config for SHOT\_CHANGE\_DETECTION. + { + "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Config for SHOT_CHANGE_DETECTION. Attributes: model: Model to use for shot change detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ShotChangeDetectionConfig) ExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( "ExplicitContentDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Config for EXPLICIT\_CONTENT\_DETECTION. + { + "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. Attributes: model: Model to use for explicit content detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentDetectionConfig) FaceDetectionConfig = _reflection.GeneratedProtocolMessageType( "FaceDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_FACEDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Config for FACE\_DETECTION. + { + "DESCRIPTOR": _FACEDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Config for FACE_DETECTION. Attributes: model: Model to use for face detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. include_bounding_boxes: Whether bounding boxes be included in the face annotation output. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.FaceDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(FaceDetectionConfig) VideoSegment = _reflection.GeneratedProtocolMessageType( "VideoSegment", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOSEGMENT, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video segment. + { + "DESCRIPTOR": _VIDEOSEGMENT, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video segment. Attributes: @@ -1961,17 +1954,17 @@ corresponding to the end of the segment (inclusive). """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.VideoSegment) - ), + }, ) _sym_db.RegisterMessage(VideoSegment) LabelSegment = _reflection.GeneratedProtocolMessageType( "LabelSegment", (_message.Message,), - dict( - DESCRIPTOR=_LABELSEGMENT, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for label + { + "DESCRIPTOR": _LABELSEGMENT, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for label detection. @@ -1982,17 +1975,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.LabelSegment) - ), + }, ) _sym_db.RegisterMessage(LabelSegment) LabelFrame = _reflection.GeneratedProtocolMessageType( "LabelFrame", (_message.Message,), - dict( - DESCRIPTOR=_LABELFRAME, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for label detection. + { + "DESCRIPTOR": _LABELFRAME, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for label detection. Attributes: @@ -2003,17 +1996,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.LabelFrame) - ), + }, ) _sym_db.RegisterMessage(LabelFrame) Entity = _reflection.GeneratedProtocolMessageType( "Entity", (_message.Message,), - dict( - DESCRIPTOR=_ENTITY, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Detected entity from video analysis. + { + "DESCRIPTOR": _ENTITY, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Detected entity from video analysis. Attributes: @@ -2027,17 +2020,17 @@ Language code for ``description`` in BCP-47 format. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.Entity) - ), + }, ) _sym_db.RegisterMessage(Entity) LabelAnnotation = _reflection.GeneratedProtocolMessageType( "LabelAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_LABELANNOTATION, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Label annotation. + { + "DESCRIPTOR": _LABELANNOTATION, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Label annotation. Attributes: @@ -2054,17 +2047,17 @@ All video frames where a label was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.LabelAnnotation) - ), + }, ) _sym_db.RegisterMessage(LabelAnnotation) ExplicitContentFrame = _reflection.GeneratedProtocolMessageType( "ExplicitContentFrame", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTFRAME, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for explicit content. + { + "DESCRIPTOR": _EXPLICITCONTENTFRAME, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for explicit content. Attributes: @@ -2075,17 +2068,17 @@ Likelihood of the pornography content.. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.ExplicitContentFrame) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentFrame) ExplicitContentAnnotation = _reflection.GeneratedProtocolMessageType( "ExplicitContentAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTANNOTATION, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual + { + "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. @@ -2095,18 +2088,18 @@ All video frames where explicit content was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentAnnotation) NormalizedBoundingBox = _reflection.GeneratedProtocolMessageType( "NormalizedBoundingBox", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDBOUNDINGBOX, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Normalized bounding box. The normalized vertex coordinates - are relative to the original image. Range: [0, 1]. + { + "DESCRIPTOR": _NORMALIZEDBOUNDINGBOX, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Normalized bounding box. The normalized vertex coordinates are relative + to the original image. Range: [0, 1]. Attributes: @@ -2120,17 +2113,17 @@ Bottom Y coordinate. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.NormalizedBoundingBox) - ), + }, ) _sym_db.RegisterMessage(NormalizedBoundingBox) FaceSegment = _reflection.GeneratedProtocolMessageType( "FaceSegment", (_message.Message,), - dict( - DESCRIPTOR=_FACESEGMENT, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for face detection. + { + "DESCRIPTOR": _FACESEGMENT, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for face detection. Attributes: @@ -2138,17 +2131,17 @@ Video segment where a face was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.FaceSegment) - ), + }, ) _sym_db.RegisterMessage(FaceSegment) FaceFrame = _reflection.GeneratedProtocolMessageType( "FaceFrame", (_message.Message,), - dict( - DESCRIPTOR=_FACEFRAME, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for face detection. + { + "DESCRIPTOR": _FACEFRAME, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for face detection. Attributes: @@ -2161,17 +2154,17 @@ corresponding to the video frame for this location. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.FaceFrame) - ), + }, ) _sym_db.RegisterMessage(FaceFrame) FaceAnnotation = _reflection.GeneratedProtocolMessageType( "FaceAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_FACEANNOTATION, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Face annotation. + { + "DESCRIPTOR": _FACEANNOTATION, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Face annotation. Attributes: @@ -2183,17 +2176,17 @@ All video frames where a face was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.FaceAnnotation) - ), + }, ) _sym_db.RegisterMessage(FaceAnnotation) VideoAnnotationResults = _reflection.GeneratedProtocolMessageType( "VideoAnnotationResults", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONRESULTS, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Annotation results for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Annotation results for a single video. Attributes: @@ -2222,17 +2215,17 @@ fail. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.VideoAnnotationResults) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationResults) AnnotateVideoResponse = _reflection.GeneratedProtocolMessageType( "AnnotateVideoResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEORESPONSE, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` + { + "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2243,17 +2236,17 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.AnnotateVideoResponse) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoResponse) VideoAnnotationProgress = _reflection.GeneratedProtocolMessageType( "VideoAnnotationProgress", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONPROGRESS, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Annotation progress for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Annotation progress for a single video. Attributes: @@ -2269,17 +2262,17 @@ Time of the most recent update. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.VideoAnnotationProgress) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationProgress) AnnotateVideoProgress = _reflection.GeneratedProtocolMessageType( "AnnotateVideoProgress", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, - __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` + { + "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, + "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2290,7 +2283,7 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1beta2.AnnotateVideoProgress) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoProgress) @@ -2305,9 +2298,7 @@ full_name="google.cloud.videointelligence.v1beta2.VideoIntelligenceService", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), + serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", serialized_start=4250, serialized_end=4580, methods=[ @@ -2318,9 +2309,7 @@ containing_service=None, input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002\035"\030/v1beta2/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress' - ), + serialized_options=b'\202\323\344\223\002\035"\030/v1beta2/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', ) ], ) diff --git a/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py index c46c50f8..d752216e 100644 --- a/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py @@ -193,9 +193,9 @@ def __init__( # Service calls def annotate_video( self, + features, input_uri=None, input_content=None, - features=None, video_context=None, output_uri=None, location_id=None, @@ -215,11 +215,11 @@ def annotate_video( >>> >>> client = videointelligence_v1p1beta1.VideoIntelligenceServiceClient() >>> - >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> features_element = enums.Feature.LABEL_DETECTION >>> features = [features_element] + >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> - >>> response = client.annotate_video(input_uri=input_uri, features=features) + >>> response = client.annotate_video(features, input_uri=input_uri) >>> >>> def callback(operation_future): ... # Handle result. @@ -231,6 +231,7 @@ def annotate_video( >>> metadata = response.metadata() Args: + features (list[~google.cloud.videointelligence_v1p1beta1.types.Feature]): Required. Requested video annotation features. input_uri (str): Input video location. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` @@ -238,19 +239,18 @@ def annotate_video( more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus identify - multiple videos. Supported wildcards: '\*' to match 0 or more - characters; '?' to match 1 character. If unset, the input video should - be embedded in the request as ``input_content``. If set, - ``input_content`` should be unset. - input_content (bytes): The video data bytes. If unset, the input video(s) should be specified - via ``input_uri``. If set, ``input_uri`` should be unset. - features (list[~google.cloud.videointelligence_v1p1beta1.types.Feature]): Required. Requested video annotation features. + multiple videos. Supported wildcards: '*' to match 0 or more characters; + '?' to match 1 character. If unset, the input video should be embedded + in the request as ``input_content``. If set, ``input_content`` should be + unset. + input_content (bytes): The video data bytes. If unset, the input video(s) should be + specified via ``input_uri``. If set, ``input_uri`` should be unset. video_context (Union[dict, ~google.cloud.videointelligence_v1p1beta1.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1p1beta1.types.VideoContext` - output_uri (str): Optional. Location where the output (in JSON format) should be stored. - Currently, only `Google Cloud + output_uri (str): Optional. Location where the output (in JSON format) should be + stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For @@ -291,9 +291,9 @@ def annotate_video( ) request = video_intelligence_pb2.AnnotateVideoRequest( + features=features, input_uri=input_uri, input_content=input_content, - features=features, video_context=video_context, output_uri=output_uri, location_id=location_id, diff --git a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py index 939c5950..688dfc9c 100644 --- a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -31,12 +28,8 @@ name="google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto", package="google.cloud.videointelligence.v1p1beta1", syntax="proto3", - serialized_options=_b( - "\n,com.google.cloud.videointelligence.v1p1beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P1Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p1beta1\352\002+Google::Cloud::VideoIntelligence::V1p1beta1" - ), - serialized_pb=_b( - '\nGgoogle/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p1beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x82\x04\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p1beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p1beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p1beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame"\xf5\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p1beta1.SpeechTranscription\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress"\x92\x02\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p1beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"s\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative"\x8e\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x41\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p1beta1.WordInfo"t\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t*\x8c\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p1beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xae\x02\n,com.google.cloud.videointelligence.v1p1beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P1Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p1beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p1beta1b\x06proto3' - ), + serialized_options=b"\n,com.google.cloud.videointelligence.v1p1beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P1Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p1beta1\352\002+Google::Cloud::VideoIntelligence::V1p1beta1", + serialized_pb=b'\nGgoogle/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p1beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x82\x04\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p1beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p1beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p1beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame"\xf5\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p1beta1.SpeechTranscription\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress"\x92\x02\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p1beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"s\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative"\x8e\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x41\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p1beta1.WordInfo"t\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t*\x8c\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p1beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xae\x02\n,com.google.cloud.videointelligence.v1p1beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P1Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p1beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p1beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -203,7 +196,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -221,7 +214,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -245,7 +238,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -275,13 +268,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -293,13 +286,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -479,7 +472,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -518,7 +511,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -557,7 +550,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -767,7 +760,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -785,7 +778,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -803,7 +796,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1031,7 +1024,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1235,7 +1228,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1367,13 +1360,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1391,7 +1384,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1409,7 +1402,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1427,7 +1420,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1445,7 +1438,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1463,7 +1456,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -1502,7 +1495,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ) ], @@ -1574,7 +1567,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1598,7 +1591,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1685,7 +1678,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1827,10 +1820,10 @@ AnnotateVideoRequest = _reflection.GeneratedProtocolMessageType( "AnnotateVideoRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOREQUEST, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation request. + { + "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Video annotation request. Attributes: @@ -1839,12 +1832,12 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus - identify multiple videos. Supported wildcards: '\*' to match 0 - or more characters; '?' to match 1 character. If unset, the + identify multiple videos. Supported wildcards: ’*’ to match 0 + or more characters; ‘?’ to match 1 character. If unset, the input video should be embedded in the request as ``input_content``. If set, ``input_content`` should be unset. input_content: @@ -1861,8 +1854,8 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. location_id: Optional. Cloud region where annotation should take place. @@ -1871,17 +1864,17 @@ will be determined based on video file location. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoRequest) VideoContext = _reflection.GeneratedProtocolMessageType( "VideoContext", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCONTEXT, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video context and/or feature-specific parameters. + { + "DESCRIPTOR": _VIDEOCONTEXT, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Video context and/or feature-specific parameters. Attributes: @@ -1890,92 +1883,92 @@ not required to be contiguous or span the whole video. If unspecified, each video is treated as a single segment. label_detection_config: - Config for LABEL\_DETECTION. + Config for LABEL_DETECTION. shot_change_detection_config: - Config for SHOT\_CHANGE\_DETECTION. + Config for SHOT_CHANGE_DETECTION. explicit_content_detection_config: - Config for EXPLICIT\_CONTENT\_DETECTION. + Config for EXPLICIT_CONTENT_DETECTION. speech_transcription_config: - Config for SPEECH\_TRANSCRIPTION. + Config for SPEECH_TRANSCRIPTION. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.VideoContext) - ), + }, ) _sym_db.RegisterMessage(VideoContext) LabelDetectionConfig = _reflection.GeneratedProtocolMessageType( "LabelDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_LABELDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Config for LABEL\_DETECTION. + { + "DESCRIPTOR": _LABELDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Config for LABEL_DETECTION. Attributes: label_detection_mode: - What labels should be detected with LABEL\_DETECTION, in + What labels should be detected with LABEL_DETECTION, in addition to video-level labels or segment-level labels. If unspecified, defaults to ``SHOT_MODE``. stationary_camera: - Whether the video has been shot from a stationary (i.e. non- + Whether the video has been shot from a stationary (i.e. non- moving) camera. When set to true, might improve detection accuracy for moving objects. Should be used with ``SHOT_AND_FRAME_MODE`` enabled. model: Model to use for label detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(LabelDetectionConfig) ShotChangeDetectionConfig = _reflection.GeneratedProtocolMessageType( "ShotChangeDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_SHOTCHANGEDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Config for SHOT\_CHANGE\_DETECTION. + { + "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Config for SHOT_CHANGE_DETECTION. Attributes: model: Model to use for shot change detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ShotChangeDetectionConfig) ExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( "ExplicitContentDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Config for EXPLICIT\_CONTENT\_DETECTION. + { + "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. Attributes: model: Model to use for explicit content detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentDetectionConfig) VideoSegment = _reflection.GeneratedProtocolMessageType( "VideoSegment", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOSEGMENT, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video segment. + { + "DESCRIPTOR": _VIDEOSEGMENT, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Video segment. Attributes: @@ -1987,17 +1980,17 @@ corresponding to the end of the segment (inclusive). """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.VideoSegment) - ), + }, ) _sym_db.RegisterMessage(VideoSegment) LabelSegment = _reflection.GeneratedProtocolMessageType( "LabelSegment", (_message.Message,), - dict( - DESCRIPTOR=_LABELSEGMENT, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for label + { + "DESCRIPTOR": _LABELSEGMENT, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for label detection. @@ -2008,17 +2001,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.LabelSegment) - ), + }, ) _sym_db.RegisterMessage(LabelSegment) LabelFrame = _reflection.GeneratedProtocolMessageType( "LabelFrame", (_message.Message,), - dict( - DESCRIPTOR=_LABELFRAME, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for label detection. + { + "DESCRIPTOR": _LABELFRAME, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for label detection. Attributes: @@ -2029,17 +2022,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.LabelFrame) - ), + }, ) _sym_db.RegisterMessage(LabelFrame) Entity = _reflection.GeneratedProtocolMessageType( "Entity", (_message.Message,), - dict( - DESCRIPTOR=_ENTITY, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Detected entity from video analysis. + { + "DESCRIPTOR": _ENTITY, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Detected entity from video analysis. Attributes: @@ -2053,17 +2046,17 @@ Language code for ``description`` in BCP-47 format. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.Entity) - ), + }, ) _sym_db.RegisterMessage(Entity) LabelAnnotation = _reflection.GeneratedProtocolMessageType( "LabelAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_LABELANNOTATION, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Label annotation. + { + "DESCRIPTOR": _LABELANNOTATION, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Label annotation. Attributes: @@ -2080,17 +2073,17 @@ All video frames where a label was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.LabelAnnotation) - ), + }, ) _sym_db.RegisterMessage(LabelAnnotation) ExplicitContentFrame = _reflection.GeneratedProtocolMessageType( "ExplicitContentFrame", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTFRAME, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for explicit content. + { + "DESCRIPTOR": _EXPLICITCONTENTFRAME, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for explicit content. Attributes: @@ -2101,17 +2094,17 @@ Likelihood of the pornography content.. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentFrame) ExplicitContentAnnotation = _reflection.GeneratedProtocolMessageType( "ExplicitContentAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTANNOTATION, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual + { + "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. @@ -2121,17 +2114,17 @@ All video frames where explicit content was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentAnnotation) VideoAnnotationResults = _reflection.GeneratedProtocolMessageType( "VideoAnnotationResults", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONRESULTS, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Annotation results for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Annotation results for a single video. Attributes: @@ -2159,17 +2152,17 @@ some may fail. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationResults) AnnotateVideoResponse = _reflection.GeneratedProtocolMessageType( "AnnotateVideoResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEORESPONSE, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` + { + "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2180,17 +2173,17 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoResponse) VideoAnnotationProgress = _reflection.GeneratedProtocolMessageType( "VideoAnnotationProgress", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONPROGRESS, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Annotation progress for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Annotation progress for a single video. Attributes: @@ -2206,17 +2199,17 @@ Output only. Time of the most recent update. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationProgress) AnnotateVideoProgress = _reflection.GeneratedProtocolMessageType( "AnnotateVideoProgress", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` + { + "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2227,24 +2220,24 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoProgress) SpeechTranscriptionConfig = _reflection.GeneratedProtocolMessageType( "SpeechTranscriptionConfig", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHTRANSCRIPTIONCONFIG, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Config for SPEECH\_TRANSCRIPTION. + { + "DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Config for SPEECH_TRANSCRIPTION. Attributes: language_code: Required. *Required* The language of the supplied audio as a `BCP-47 `__ - language tag. Example: "en-US". See `Language Support + language tag. Example: “en-US”. See `Language Support `__ for a list of the currently supported language codes. max_alternatives: @@ -2258,45 +2251,45 @@ filter_profanity: Optional. If set to ``true``, the server will attempt to filter out profanities, replacing all but the initial - character in each filtered word with asterisks, e.g. - "f\*\*\*". If set to ``false`` or omitted, profanities won't - be filtered out. + character in each filtered word with asterisks, e.g. "f***". + If set to ``false`` or omitted, profanities won’t be filtered + out. speech_contexts: Optional. A means to provide context to assist the speech recognition. enable_automatic_punctuation: - Optional. If 'true', adds punctuation to recognition result + Optional. If ‘true’, adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no - effect at all. The default 'false' value does not add - punctuation to result hypotheses. NOTE: "This is currently + effect at all. The default ‘false’ value does not add + punctuation to result hypotheses. NOTE: “This is currently offered as an experimental service, complimentary to all users. In the future this may be exclusively available as a - premium feature." + premium feature.” audio_tracks: Optional. For file formats, such as MXF or MKV, supporting multiple audio tracks, specify up to two tracks. Default: track 0. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig) - ), + }, ) _sym_db.RegisterMessage(SpeechTranscriptionConfig) SpeechContext = _reflection.GeneratedProtocolMessageType( "SpeechContext", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHCONTEXT, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Provides "hints" to the speech recognizer to favor + { + "DESCRIPTOR": _SPEECHCONTEXT, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Provides “hints” to the speech recognizer to favor specific words and phrases in the results. Attributes: phrases: Optional. A list of strings containing words and phrases - "hints" so that the speech recognition is more likely to + “hints” so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add @@ -2305,17 +2298,17 @@ `__. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.SpeechContext) - ), + }, ) _sym_db.RegisterMessage(SpeechContext) SpeechTranscription = _reflection.GeneratedProtocolMessageType( "SpeechTranscription", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHTRANSCRIPTION, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""A speech recognition result corresponding to a portion of + { + "DESCRIPTOR": _SPEECHTRANSCRIPTION, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """A speech recognition result corresponding to a portion of the audio. @@ -2328,17 +2321,17 @@ recognizer. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.SpeechTranscription) - ), + }, ) _sym_db.RegisterMessage(SpeechTranscription) SpeechRecognitionAlternative = _reflection.GeneratedProtocolMessageType( "SpeechRecognitionAlternative", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHRECOGNITIONALTERNATIVE, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Alternative hypotheses (a.k.a. n-best list). + { + "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Alternative hypotheses (a.k.a. n-best list). Attributes: @@ -2358,17 +2351,17 @@ recognized word. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative) - ), + }, ) _sym_db.RegisterMessage(SpeechRecognitionAlternative) WordInfo = _reflection.GeneratedProtocolMessageType( "WordInfo", (_message.Message,), - dict( - DESCRIPTOR=_WORDINFO, - __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Word-specific information for recognized words. Word + { + "DESCRIPTOR": _WORDINFO, + "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", + "__doc__": """Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as ``enable_word_time_offsets``. @@ -2391,7 +2384,7 @@ information. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.WordInfo) - ), + }, ) _sym_db.RegisterMessage(WordInfo) @@ -2416,9 +2409,7 @@ full_name="google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), + serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", serialized_start=4449, serialized_end=4783, methods=[ @@ -2429,9 +2420,7 @@ containing_service=None, input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002\037"\032/v1p1beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress' - ), + serialized_options=b'\202\323\344\223\002\037"\032/v1p1beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', ) ], ) diff --git a/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py index 56f54b69..a220c555 100644 --- a/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py @@ -193,9 +193,9 @@ def __init__( # Service calls def annotate_video( self, + features, input_uri=None, input_content=None, - features=None, video_context=None, output_uri=None, location_id=None, @@ -215,11 +215,11 @@ def annotate_video( >>> >>> client = videointelligence_v1p2beta1.VideoIntelligenceServiceClient() >>> - >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> features_element = enums.Feature.LABEL_DETECTION >>> features = [features_element] + >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> - >>> response = client.annotate_video(input_uri=input_uri, features=features) + >>> response = client.annotate_video(features, input_uri=input_uri) >>> >>> def callback(operation_future): ... # Handle result. @@ -231,6 +231,7 @@ def annotate_video( >>> metadata = response.metadata() Args: + features (list[~google.cloud.videointelligence_v1p2beta1.types.Feature]): Required. Requested video annotation features. input_uri (str): Input video location. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` @@ -238,19 +239,18 @@ def annotate_video( more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus identify - multiple videos. Supported wildcards: '\*' to match 0 or more - characters; '?' to match 1 character. If unset, the input video should - be embedded in the request as ``input_content``. If set, - ``input_content`` should be unset. - input_content (bytes): The video data bytes. If unset, the input video(s) should be specified - via ``input_uri``. If set, ``input_uri`` should be unset. - features (list[~google.cloud.videointelligence_v1p2beta1.types.Feature]): Required. Requested video annotation features. + multiple videos. Supported wildcards: '*' to match 0 or more characters; + '?' to match 1 character. If unset, the input video should be embedded + in the request as ``input_content``. If set, ``input_content`` should be + unset. + input_content (bytes): The video data bytes. If unset, the input video(s) should be + specified via ``input_uri``. If set, ``input_uri`` should be unset. video_context (Union[dict, ~google.cloud.videointelligence_v1p2beta1.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1p2beta1.types.VideoContext` - output_uri (str): Optional. Location where the output (in JSON format) should be stored. - Currently, only `Google Cloud + output_uri (str): Optional. Location where the output (in JSON format) should be + stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For @@ -291,9 +291,9 @@ def annotate_video( ) request = video_intelligence_pb2.AnnotateVideoRequest( + features=features, input_uri=input_uri, input_content=input_content, - features=features, video_context=video_context, output_uri=output_uri, location_id=location_id, diff --git a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py index b55b39d9..63dc0539 100644 --- a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -31,12 +28,8 @@ name="google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto", package="google.cloud.videointelligence.v1p2beta1", syntax="proto3", - serialized_options=_b( - "\n,com.google.cloud.videointelligence.v1p2beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P2Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p2beta1\352\002+Google::Cloud::VideoIntelligence::V1p2beta1" - ), - serialized_pb=_b( - '\nGgoogle/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p2beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p2beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xf6\x03\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p2beta1.ExplicitContentDetectionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.TextDetectionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p2beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"-\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p2beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p2beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcb\x05\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ExplicitContentAnnotation\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p2beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p2beta1.VideoAnnotationProgress"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p2beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p2beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p2beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x88\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame\x12G\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment*\x9b\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p2beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xae\x02\n,com.google.cloud.videointelligence.v1p2beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P2Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p2beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p2beta1b\x06proto3' - ), + serialized_options=b"\n,com.google.cloud.videointelligence.v1p2beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P2Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p2beta1\352\002+Google::Cloud::VideoIntelligence::V1p2beta1", + serialized_pb=b'\nGgoogle/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p2beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p2beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xf6\x03\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p2beta1.ExplicitContentDetectionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.TextDetectionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p2beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"-\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p2beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p2beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcb\x05\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ExplicitContentAnnotation\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p2beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p2beta1.VideoAnnotationProgress"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p2beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p2beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p2beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x88\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame\x12G\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment*\x9b\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p2beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xae\x02\n,com.google.cloud.videointelligence.v1p2beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P2Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p2beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p2beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -207,7 +200,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -225,7 +218,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -249,7 +242,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -279,13 +272,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -297,13 +290,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -483,7 +476,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -522,7 +515,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -561,7 +554,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -810,7 +803,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -828,7 +821,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -846,7 +839,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1167,7 +1160,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1389,7 +1382,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1749,7 +1742,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2066,10 +2059,10 @@ AnnotateVideoRequest = _reflection.GeneratedProtocolMessageType( "AnnotateVideoRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOREQUEST, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation request. + { + "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video annotation request. Attributes: @@ -2078,12 +2071,12 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus - identify multiple videos. Supported wildcards: '\*' to match 0 - or more characters; '?' to match 1 character. If unset, the + identify multiple videos. Supported wildcards: ’*’ to match 0 + or more characters; ‘?’ to match 1 character. If unset, the input video should be embedded in the request as ``input_content``. If set, ``input_content`` should be unset. input_content: @@ -2100,8 +2093,8 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. location_id: Optional. Cloud region where annotation should take place. @@ -2110,17 +2103,17 @@ will be determined based on video file location. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoRequest) VideoContext = _reflection.GeneratedProtocolMessageType( "VideoContext", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCONTEXT, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video context and/or feature-specific parameters. + { + "DESCRIPTOR": _VIDEOCONTEXT, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video context and/or feature-specific parameters. Attributes: @@ -2129,92 +2122,92 @@ not required to be contiguous or span the whole video. If unspecified, each video is treated as a single segment. label_detection_config: - Config for LABEL\_DETECTION. + Config for LABEL_DETECTION. shot_change_detection_config: - Config for SHOT\_CHANGE\_DETECTION. + Config for SHOT_CHANGE_DETECTION. explicit_content_detection_config: - Config for EXPLICIT\_CONTENT\_DETECTION. + Config for EXPLICIT_CONTENT_DETECTION. text_detection_config: - Config for TEXT\_DETECTION. + Config for TEXT_DETECTION. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.VideoContext) - ), + }, ) _sym_db.RegisterMessage(VideoContext) LabelDetectionConfig = _reflection.GeneratedProtocolMessageType( "LabelDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_LABELDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Config for LABEL\_DETECTION. + { + "DESCRIPTOR": _LABELDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Config for LABEL_DETECTION. Attributes: label_detection_mode: - What labels should be detected with LABEL\_DETECTION, in + What labels should be detected with LABEL_DETECTION, in addition to video-level labels or segment-level labels. If unspecified, defaults to ``SHOT_MODE``. stationary_camera: - Whether the video has been shot from a stationary (i.e. non- + Whether the video has been shot from a stationary (i.e. non- moving) camera. When set to true, might improve detection accuracy for moving objects. Should be used with ``SHOT_AND_FRAME_MODE`` enabled. model: Model to use for label detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.LabelDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(LabelDetectionConfig) ShotChangeDetectionConfig = _reflection.GeneratedProtocolMessageType( "ShotChangeDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_SHOTCHANGEDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Config for SHOT\_CHANGE\_DETECTION. + { + "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Config for SHOT_CHANGE_DETECTION. Attributes: model: Model to use for shot change detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.ShotChangeDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ShotChangeDetectionConfig) ExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( "ExplicitContentDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Config for EXPLICIT\_CONTENT\_DETECTION. + { + "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. Attributes: model: Model to use for explicit content detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.ExplicitContentDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentDetectionConfig) TextDetectionConfig = _reflection.GeneratedProtocolMessageType( "TextDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_TEXTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Config for TEXT\_DETECTION. + { + "DESCRIPTOR": _TEXTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Config for TEXT_DETECTION. Attributes: @@ -2226,17 +2219,17 @@ is provided. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.TextDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(TextDetectionConfig) VideoSegment = _reflection.GeneratedProtocolMessageType( "VideoSegment", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOSEGMENT, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video segment. + { + "DESCRIPTOR": _VIDEOSEGMENT, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video segment. Attributes: @@ -2248,17 +2241,17 @@ corresponding to the end of the segment (inclusive). """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.VideoSegment) - ), + }, ) _sym_db.RegisterMessage(VideoSegment) LabelSegment = _reflection.GeneratedProtocolMessageType( "LabelSegment", (_message.Message,), - dict( - DESCRIPTOR=_LABELSEGMENT, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for label + { + "DESCRIPTOR": _LABELSEGMENT, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for label detection. @@ -2269,17 +2262,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.LabelSegment) - ), + }, ) _sym_db.RegisterMessage(LabelSegment) LabelFrame = _reflection.GeneratedProtocolMessageType( "LabelFrame", (_message.Message,), - dict( - DESCRIPTOR=_LABELFRAME, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for label detection. + { + "DESCRIPTOR": _LABELFRAME, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for label detection. Attributes: @@ -2290,17 +2283,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.LabelFrame) - ), + }, ) _sym_db.RegisterMessage(LabelFrame) Entity = _reflection.GeneratedProtocolMessageType( "Entity", (_message.Message,), - dict( - DESCRIPTOR=_ENTITY, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Detected entity from video analysis. + { + "DESCRIPTOR": _ENTITY, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Detected entity from video analysis. Attributes: @@ -2314,17 +2307,17 @@ Language code for ``description`` in BCP-47 format. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.Entity) - ), + }, ) _sym_db.RegisterMessage(Entity) LabelAnnotation = _reflection.GeneratedProtocolMessageType( "LabelAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_LABELANNOTATION, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Label annotation. + { + "DESCRIPTOR": _LABELANNOTATION, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Label annotation. Attributes: @@ -2341,17 +2334,17 @@ All video frames where a label was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.LabelAnnotation) - ), + }, ) _sym_db.RegisterMessage(LabelAnnotation) ExplicitContentFrame = _reflection.GeneratedProtocolMessageType( "ExplicitContentFrame", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTFRAME, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for explicit content. + { + "DESCRIPTOR": _EXPLICITCONTENTFRAME, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for explicit content. Attributes: @@ -2362,17 +2355,17 @@ Likelihood of the pornography content.. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.ExplicitContentFrame) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentFrame) ExplicitContentAnnotation = _reflection.GeneratedProtocolMessageType( "ExplicitContentAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTANNOTATION, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual + { + "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. @@ -2382,17 +2375,17 @@ All video frames where explicit content was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.ExplicitContentAnnotation) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentAnnotation) NormalizedBoundingBox = _reflection.GeneratedProtocolMessageType( "NormalizedBoundingBox", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDBOUNDINGBOX, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding box. The normalized vertex coordinates + { + "DESCRIPTOR": _NORMALIZEDBOUNDINGBOX, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1]. @@ -2407,17 +2400,17 @@ Bottom Y coordinate. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox) - ), + }, ) _sym_db.RegisterMessage(NormalizedBoundingBox) VideoAnnotationResults = _reflection.GeneratedProtocolMessageType( "VideoAnnotationResults", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONRESULTS, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Annotation results for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Annotation results for a single video. Attributes: @@ -2449,17 +2442,17 @@ fail. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationResults) AnnotateVideoResponse = _reflection.GeneratedProtocolMessageType( "AnnotateVideoResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEORESPONSE, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` + { + "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2470,17 +2463,17 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoResponse) VideoAnnotationProgress = _reflection.GeneratedProtocolMessageType( "VideoAnnotationProgress", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONPROGRESS, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Annotation progress for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Annotation progress for a single video. Attributes: @@ -2496,17 +2489,17 @@ Time of the most recent update. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.VideoAnnotationProgress) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationProgress) AnnotateVideoProgress = _reflection.GeneratedProtocolMessageType( "AnnotateVideoProgress", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` + { + "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2517,17 +2510,17 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoProgress) NormalizedVertex = _reflection.GeneratedProtocolMessageType( "NormalizedVertex", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDVERTEX, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""X coordinate. + { + "DESCRIPTOR": _NORMALIZEDVERTEX, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """X coordinate. Attributes: @@ -2535,24 +2528,24 @@ Y coordinate. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.NormalizedVertex) - ), + }, ) _sym_db.RegisterMessage(NormalizedVertex) NormalizedBoundingPoly = _reflection.GeneratedProtocolMessageType( "NormalizedBoundingPoly", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDBOUNDINGPOLY, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding polygon for text (that might not be + { + "DESCRIPTOR": _NORMALIZEDBOUNDINGPOLY, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular - bounding box: When the text is horizontal it might look like: 0----1 \| - \| 3----2 + bounding box: When the text is horizontal it might look like: 0—-1 \| \| + 3—-2 - When it's clockwise rotated 180 degrees around the top-left corner it - becomes: 2----3 \| \| 1----0 + When it’s clockwise rotated 180 degrees around the top-left corner it + becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for @@ -2564,17 +2557,17 @@ Normalized vertices of the bounding polygon. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.NormalizedBoundingPoly) - ), + }, ) _sym_db.RegisterMessage(NormalizedBoundingPoly) TextSegment = _reflection.GeneratedProtocolMessageType( "TextSegment", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSEGMENT, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for text detection. + { + "DESCRIPTOR": _TEXTSEGMENT, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for text detection. Attributes: @@ -2588,17 +2581,17 @@ appears. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.TextSegment) - ), + }, ) _sym_db.RegisterMessage(TextSegment) TextFrame = _reflection.GeneratedProtocolMessageType( "TextFrame", (_message.Message,), - dict( - DESCRIPTOR=_TEXTFRAME, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for text annotation + { + "DESCRIPTOR": _TEXTFRAME, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets. @@ -2610,17 +2603,17 @@ Timestamp of this frame. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.TextFrame) - ), + }, ) _sym_db.RegisterMessage(TextFrame) TextAnnotation = _reflection.GeneratedProtocolMessageType( "TextAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TEXTANNOTATION, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Annotations related to one detected OCR text snippet. This + { + "DESCRIPTOR": _TEXTANNOTATION, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection. @@ -2632,17 +2625,17 @@ All video segments where OCR detected text appears. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.TextAnnotation) - ), + }, ) _sym_db.RegisterMessage(TextAnnotation) ObjectTrackingFrame = _reflection.GeneratedProtocolMessageType( "ObjectTrackingFrame", (_message.Message,), - dict( - DESCRIPTOR=_OBJECTTRACKINGFRAME, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotations for object detection and + { + "DESCRIPTOR": _OBJECTTRACKINGFRAME, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence. @@ -2655,17 +2648,17 @@ The timestamp of the frame in microseconds. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame) - ), + }, ) _sym_db.RegisterMessage(ObjectTrackingFrame) ObjectTrackingAnnotation = _reflection.GeneratedProtocolMessageType( "ObjectTrackingAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_OBJECTTRACKINGANNOTATION, - __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Annotations corresponding to one tracked object. + { + "DESCRIPTOR": _OBJECTTRACKINGANNOTATION, + "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", + "__doc__": """Annotations corresponding to one tracked object. Attributes: @@ -2673,7 +2666,7 @@ Entity to specify the object category that this track is labeled as. confidence: - Object category's labeling confidence of this track. + Object category’s labeling confidence of this track. frames: Information corresponding to all frames where this object track appears. @@ -2682,7 +2675,7 @@ appears. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation) - ), + }, ) _sym_db.RegisterMessage(ObjectTrackingAnnotation) @@ -2697,9 +2690,7 @@ full_name="google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), + serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", serialized_start=4996, serialized_end=5330, methods=[ @@ -2710,9 +2701,7 @@ containing_service=None, input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002\037"\032/v1p2beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress' - ), + serialized_options=b'\202\323\344\223\002\037"\032/v1p2beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', ) ], ) diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py b/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py index 53bb9872..2cf8bb06 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py @@ -18,7 +18,7 @@ }, "methods": { "StreamingAnnotateVideo": { - "timeout_millis": 10800000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", } diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py index f9c553f4..e163ecac 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py @@ -193,9 +193,9 @@ def __init__( # Service calls def annotate_video( self, + features, input_uri=None, input_content=None, - features=None, video_context=None, output_uri=None, location_id=None, @@ -215,11 +215,11 @@ def annotate_video( >>> >>> client = videointelligence_v1p3beta1.VideoIntelligenceServiceClient() >>> - >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> features_element = enums.Feature.LABEL_DETECTION >>> features = [features_element] + >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> - >>> response = client.annotate_video(input_uri=input_uri, features=features) + >>> response = client.annotate_video(features, input_uri=input_uri) >>> >>> def callback(operation_future): ... # Handle result. @@ -231,6 +231,7 @@ def annotate_video( >>> metadata = response.metadata() Args: + features (list[~google.cloud.videointelligence_v1p3beta1.types.Feature]): Required. Requested video annotation features. input_uri (str): Input video location. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` @@ -238,19 +239,18 @@ def annotate_video( more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus identify - multiple videos. Supported wildcards: '\*' to match 0 or more - characters; '?' to match 1 character. If unset, the input video should - be embedded in the request as ``input_content``. If set, - ``input_content`` should be unset. - input_content (bytes): The video data bytes. If unset, the input video(s) should be specified - via ``input_uri``. If set, ``input_uri`` should be unset. - features (list[~google.cloud.videointelligence_v1p3beta1.types.Feature]): Required. Requested video annotation features. + multiple videos. Supported wildcards: '*' to match 0 or more characters; + '?' to match 1 character. If unset, the input video should be embedded + in the request as ``input_content``. If set, ``input_content`` should be + unset. + input_content (bytes): The video data bytes. If unset, the input video(s) should be + specified via ``input_uri``. If set, ``input_uri`` should be unset. video_context (Union[dict, ~google.cloud.videointelligence_v1p3beta1.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1p3beta1.types.VideoContext` - output_uri (str): Optional. Location where the output (in JSON format) should be stored. - Currently, only `Google Cloud + output_uri (str): Optional. Location where the output (in JSON format) should be + stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For @@ -291,9 +291,9 @@ def annotate_video( ) request = video_intelligence_pb2.AnnotateVideoRequest( + features=features, input_uri=input_uri, input_content=input_content, - features=features, video_context=video_context, output_uri=output_uri, location_id=location_id, diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index dd9b41b9..8f67f8d3 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -31,12 +28,8 @@ name="google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto", package="google.cloud.videointelligence.v1p3beta1", syntax="proto3", - serialized_options=_b( - "\n,com.google.cloud.videointelligence.v1p3beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P3Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p3beta1" - ), - serialized_pb=_b( - '\nGgoogle/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p3beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x80\x07\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig\x12\\\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig\x12`\n\x17person_detection_config\x18\x0b \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig\x12^\n\x16object_tracking_config\x18\r \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig"\xe4\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p3beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p3beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p3beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcf\x02\n\x11TimestampedObject\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12R\n\tlandmarks\x18\x04 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.DetectedLandmarkB\x03\xe0\x41\x01"\x99\x02\n\x05Track\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12X\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.TimestampedObject\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"D\n\tCelebrity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t"\xab\x02\n\x0e\x43\x65lebrityTrack\x12\x61\n\x0b\x63\x65lebrities\x18\x01 \x03(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity\x12\x43\n\nface_track\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x1aq\n\x13RecognizedCelebrity\x12\x46\n\tcelebrity\x18\x01 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.Celebrity\x12\x12\n\nconfidence\x18\x02 \x01(\x02"t\n\x1e\x43\x65lebrityRecognitionAnnotation\x12R\n\x10\x63\x65lebrity_tracks\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.CelebrityTrack"\x7f\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12I\n\x05point\x18\x02 \x01(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"m\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12?\n\x06tracks\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12\x11\n\tthumbnail\x18\x04 \x01(\x0c"\\\n\x19PersonDetectionAnnotation\x12?\n\x06tracks\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track"\xef\x0b\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12G\n\x07segment\x18\n \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x62\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.SpeechTranscription\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation\x12i\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation\x12i\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation\x12s\n!celebrity_recognition_annotations\x18\x15 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults"\xb4\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.Feature\x12G\n\x07segment\x18\x06 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress"\x88\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p3beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x8f\x01\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x93\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x46\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p3beta1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p3beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xac\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame\x12I\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x42\x0c\n\ntrack_info"\xe8\x01\n\x19LogoRecognitionAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12?\n\x06tracks\x18\x02 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"\xa5\x01\n\x1dStreamingAnnotateVideoRequest\x12V\n\x0cvideo_config\x18\x01 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfigH\x00\x12\x17\n\rinput_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request"\xca\x01\n\x1eStreamingAnnotateVideoResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x65\n\x12\x61nnotation_results\x18\x02 \x01(\x0b\x32I.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults\x12\x1e\n\x16\x61nnotation_results_uri\x18\x03 \x01(\t"9\n#StreamingAutomlClassificationConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"9\n#StreamingAutomlObjectTrackingConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t")\n\'StreamingExplicitContentDetectionConfig":\n\x1dStreamingLabelDetectionConfig\x12\x19\n\x11stationary_camera\x18\x01 \x01(\x08"\x1f\n\x1dStreamingObjectTrackingConfig"$\n"StreamingShotChangeDetectionConfig"o\n\x16StreamingStorageConfig\x12(\n enable_storage_annotation_result\x18\x01 \x01(\x08\x12+\n#annotation_result_storage_directory\x18\x03 \x01(\t"\x8b\x03\n\x1fStreamingVideoAnnotationResults\x12P\n\x10shot_annotations\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12T\n\x11label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12`\n\x13\x65xplicit_annotation\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12^\n\x12object_annotations\x18\x04 \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation"\x8c\x07\n\x14StreamingVideoConfig\x12K\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1p3beta1.StreamingFeature\x12t\n\x1cshot_change_detection_config\x18\x02 \x01(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfigH\x00\x12i\n\x16label_detection_config\x18\x03 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfigH\x00\x12~\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32Q.google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfigH\x00\x12i\n\x16object_tracking_config\x18\x05 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfigH\x00\x12u\n\x1c\x61utoml_classification_config\x18\x15 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfigH\x00\x12v\n\x1d\x61utoml_object_tracking_config\x18\x16 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfigH\x00\x12X\n\x0estorage_config\x18\x1e \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfigB\x12\n\x10streaming_config*\x90\x02\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x19\n\x15\x43\x45LEBRITY_RECOGNITION\x10\r\x12\x14\n\x10PERSON_DETECTION\x10\x0e*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05*\x8d\x02\n\x10StreamingFeature\x12!\n\x1dSTREAMING_FEATURE_UNSPECIFIED\x10\x00\x12\x1d\n\x19STREAMING_LABEL_DETECTION\x10\x01\x12#\n\x1fSTREAMING_SHOT_CHANGE_DETECTION\x10\x02\x12(\n$STREAMING_EXPLICIT_CONTENT_DETECTION\x10\x03\x12\x1d\n\x19STREAMING_OBJECT_TRACKING\x10\x04\x12#\n\x1fSTREAMING_AUTOML_CLASSIFICATION\x10\x15\x12$\n STREAMING_AUTOML_OBJECT_TRACKING\x10\x16\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p3beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platform2\xad\x02\n!StreamingVideoIntelligenceService\x12\xb1\x01\n\x16StreamingAnnotateVideo\x12G.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest\x1aH.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse"\x00(\x01\x30\x01\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x80\x02\n,com.google.cloud.videointelligence.v1p3beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P3Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p3beta1b\x06proto3' - ), + serialized_options=b"\n,com.google.cloud.videointelligence.v1p3beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P3Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p3beta1", + serialized_pb=b'\nGgoogle/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p3beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x80\x07\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig\x12\\\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig\x12`\n\x17person_detection_config\x18\x0b \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig\x12^\n\x16object_tracking_config\x18\r \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig"\xe4\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p3beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p3beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p3beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcf\x02\n\x11TimestampedObject\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12R\n\tlandmarks\x18\x04 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.DetectedLandmarkB\x03\xe0\x41\x01"\x99\x02\n\x05Track\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12X\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.TimestampedObject\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"D\n\tCelebrity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t"\xab\x02\n\x0e\x43\x65lebrityTrack\x12\x61\n\x0b\x63\x65lebrities\x18\x01 \x03(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity\x12\x43\n\nface_track\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x1aq\n\x13RecognizedCelebrity\x12\x46\n\tcelebrity\x18\x01 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.Celebrity\x12\x12\n\nconfidence\x18\x02 \x01(\x02"t\n\x1e\x43\x65lebrityRecognitionAnnotation\x12R\n\x10\x63\x65lebrity_tracks\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.CelebrityTrack"\x7f\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12I\n\x05point\x18\x02 \x01(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"m\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12?\n\x06tracks\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12\x11\n\tthumbnail\x18\x04 \x01(\x0c"\\\n\x19PersonDetectionAnnotation\x12?\n\x06tracks\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track"\xef\x0b\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12G\n\x07segment\x18\n \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x62\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.SpeechTranscription\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation\x12i\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation\x12i\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation\x12s\n!celebrity_recognition_annotations\x18\x15 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults"\xb4\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.Feature\x12G\n\x07segment\x18\x06 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress"\x88\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p3beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x8f\x01\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x93\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x46\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p3beta1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p3beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xac\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame\x12I\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x42\x0c\n\ntrack_info"\xe8\x01\n\x19LogoRecognitionAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12?\n\x06tracks\x18\x02 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"\xa5\x01\n\x1dStreamingAnnotateVideoRequest\x12V\n\x0cvideo_config\x18\x01 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfigH\x00\x12\x17\n\rinput_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request"\xca\x01\n\x1eStreamingAnnotateVideoResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x65\n\x12\x61nnotation_results\x18\x02 \x01(\x0b\x32I.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults\x12\x1e\n\x16\x61nnotation_results_uri\x18\x03 \x01(\t"9\n#StreamingAutomlClassificationConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"9\n#StreamingAutomlObjectTrackingConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t")\n\'StreamingExplicitContentDetectionConfig":\n\x1dStreamingLabelDetectionConfig\x12\x19\n\x11stationary_camera\x18\x01 \x01(\x08"\x1f\n\x1dStreamingObjectTrackingConfig"$\n"StreamingShotChangeDetectionConfig"o\n\x16StreamingStorageConfig\x12(\n enable_storage_annotation_result\x18\x01 \x01(\x08\x12+\n#annotation_result_storage_directory\x18\x03 \x01(\t"\x8b\x03\n\x1fStreamingVideoAnnotationResults\x12P\n\x10shot_annotations\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12T\n\x11label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12`\n\x13\x65xplicit_annotation\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12^\n\x12object_annotations\x18\x04 \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation"\x8c\x07\n\x14StreamingVideoConfig\x12K\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1p3beta1.StreamingFeature\x12t\n\x1cshot_change_detection_config\x18\x02 \x01(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfigH\x00\x12i\n\x16label_detection_config\x18\x03 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfigH\x00\x12~\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32Q.google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfigH\x00\x12i\n\x16object_tracking_config\x18\x05 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfigH\x00\x12u\n\x1c\x61utoml_classification_config\x18\x15 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfigH\x00\x12v\n\x1d\x61utoml_object_tracking_config\x18\x16 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfigH\x00\x12X\n\x0estorage_config\x18\x1e \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfigB\x12\n\x10streaming_config*\x90\x02\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x19\n\x15\x43\x45LEBRITY_RECOGNITION\x10\r\x12\x14\n\x10PERSON_DETECTION\x10\x0e*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05*\x8d\x02\n\x10StreamingFeature\x12!\n\x1dSTREAMING_FEATURE_UNSPECIFIED\x10\x00\x12\x1d\n\x19STREAMING_LABEL_DETECTION\x10\x01\x12#\n\x1fSTREAMING_SHOT_CHANGE_DETECTION\x10\x02\x12(\n$STREAMING_EXPLICIT_CONTENT_DETECTION\x10\x03\x12\x1d\n\x19STREAMING_OBJECT_TRACKING\x10\x04\x12#\n\x1fSTREAMING_AUTOML_CLASSIFICATION\x10\x15\x12$\n STREAMING_AUTOML_OBJECT_TRACKING\x10\x16\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p3beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platform2\xad\x02\n!StreamingVideoIntelligenceService\x12\xb1\x01\n\x16StreamingAnnotateVideo\x12G.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest\x1aH.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse"\x00(\x01\x30\x01\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x80\x02\n,com.google.cloud.videointelligence.v1p3beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P3Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p3beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -314,7 +307,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -332,7 +325,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -356,7 +349,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -386,13 +379,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -404,13 +397,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -662,7 +655,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -737,7 +730,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -776,7 +769,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -815,7 +808,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -854,7 +847,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1022,7 +1015,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1232,7 +1225,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1250,7 +1243,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1268,7 +1261,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1631,7 +1624,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1649,7 +1642,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -1724,7 +1717,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1742,7 +1735,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -1775,7 +1768,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1811,7 +1804,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1850,7 +1843,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1868,7 +1861,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1886,7 +1879,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2077,7 +2070,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2170,7 +2163,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -2248,7 +2241,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2614,7 +2607,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2782,13 +2775,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2806,7 +2799,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2824,7 +2817,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2842,7 +2835,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2860,7 +2853,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2878,7 +2871,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2896,7 +2889,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2914,7 +2907,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -2932,7 +2925,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ), ], @@ -2971,7 +2964,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\001"), + serialized_options=b"\340A\001", file=DESCRIPTOR, ) ], @@ -3022,13 +3015,13 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), ], @@ -3061,7 +3054,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -3085,7 +3078,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -3103,7 +3096,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), ], @@ -3172,7 +3165,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -3196,7 +3189,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -3214,7 +3207,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\003"), + serialized_options=b"\340A\003", file=DESCRIPTOR, ), ], @@ -3475,7 +3468,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -3801,7 +3794,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -3884,7 +3877,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -3923,7 +3916,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -3962,7 +3955,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -4118,7 +4111,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -4771,10 +4764,10 @@ AnnotateVideoRequest = _reflection.GeneratedProtocolMessageType( "AnnotateVideoRequest", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOREQUEST, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation request. + { + "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video annotation request. Attributes: @@ -4783,12 +4776,12 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. A video URI may include wildcards in ``object-id``, and thus - identify multiple videos. Supported wildcards: '\*' to match 0 - or more characters; '?' to match 1 character. If unset, the + identify multiple videos. Supported wildcards: ’*’ to match 0 + or more characters; ‘?’ to match 1 character. If unset, the input video should be embedded in the request as ``input_content``. If set, ``input_content`` should be unset. input_content: @@ -4805,8 +4798,8 @@ `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID\_ARGUMENT][google.rpc.Code.INVALID\_ARGUMENT - ]). For more information, see `Request URIs + .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) + . For more information, see `Request URIs `__. location_id: Optional. Cloud region where annotation should take place. @@ -4815,17 +4808,17 @@ will be determined based on video file location. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoRequest) VideoContext = _reflection.GeneratedProtocolMessageType( "VideoContext", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCONTEXT, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video context and/or feature-specific parameters. + { + "DESCRIPTOR": _VIDEOCONTEXT, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video context and/or feature-specific parameters. Attributes: @@ -4834,49 +4827,49 @@ not required to be contiguous or span the whole video. If unspecified, each video is treated as a single segment. label_detection_config: - Config for LABEL\_DETECTION. + Config for LABEL_DETECTION. shot_change_detection_config: - Config for SHOT\_CHANGE\_DETECTION. + Config for SHOT_CHANGE_DETECTION. explicit_content_detection_config: - Config for EXPLICIT\_CONTENT\_DETECTION. + Config for EXPLICIT_CONTENT_DETECTION. face_detection_config: - Config for FACE\_DETECTION. + Config for FACE_DETECTION. speech_transcription_config: - Config for SPEECH\_TRANSCRIPTION. + Config for SPEECH_TRANSCRIPTION. text_detection_config: - Config for TEXT\_DETECTION. + Config for TEXT_DETECTION. person_detection_config: - Config for PERSON\_DETECTION. + Config for PERSON_DETECTION. object_tracking_config: - Config for OBJECT\_TRACKING. + Config for OBJECT_TRACKING. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.VideoContext) - ), + }, ) _sym_db.RegisterMessage(VideoContext) LabelDetectionConfig = _reflection.GeneratedProtocolMessageType( "LabelDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_LABELDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for LABEL\_DETECTION. + { + "DESCRIPTOR": _LABELDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for LABEL_DETECTION. Attributes: label_detection_mode: - What labels should be detected with LABEL\_DETECTION, in + What labels should be detected with LABEL_DETECTION, in addition to video-level labels or segment-level labels. If unspecified, defaults to ``SHOT_MODE``. stationary_camera: - Whether the video has been shot from a stationary (i.e. non- + Whether the video has been shot from a stationary (i.e. non- moving) camera. When set to true, might improve detection accuracy for moving objects. Should be used with ``SHOT_AND_FRAME_MODE`` enabled. model: Model to use for label detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. frame_confidence_threshold: The confidence threshold we perform filtering on the labels from frame-level detection. If not set, it is set to 0.4 by @@ -4895,100 +4888,100 @@ we release a new model. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(LabelDetectionConfig) ShotChangeDetectionConfig = _reflection.GeneratedProtocolMessageType( "ShotChangeDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_SHOTCHANGEDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for SHOT\_CHANGE\_DETECTION. + { + "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for SHOT_CHANGE_DETECTION. Attributes: model: Model to use for shot change detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ShotChangeDetectionConfig) ObjectTrackingConfig = _reflection.GeneratedProtocolMessageType( "ObjectTrackingConfig", (_message.Message,), - dict( - DESCRIPTOR=_OBJECTTRACKINGCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for OBJECT\_TRACKING. + { + "DESCRIPTOR": _OBJECTTRACKINGCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for OBJECT_TRACKING. Attributes: model: Model to use for object tracking. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig) - ), + }, ) _sym_db.RegisterMessage(ObjectTrackingConfig) ExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( "ExplicitContentDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for EXPLICIT\_CONTENT\_DETECTION. + { + "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. Attributes: model: Model to use for explicit content detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentDetectionConfig) FaceDetectionConfig = _reflection.GeneratedProtocolMessageType( "FaceDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_FACEDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for FACE\_DETECTION. + { + "DESCRIPTOR": _FACEDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for FACE_DETECTION. Attributes: model: Model to use for face detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. include_bounding_boxes: Whether bounding boxes be included in the face annotation output. include_attributes: Whether to enable face attributes detection, such as glasses, - dark\_glasses, mouth\_open etc. Ignored if - 'include\_bounding\_boxes' is false. + dark_glasses, mouth_open etc. Ignored if + ‘include_bounding_boxes’ is false. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(FaceDetectionConfig) PersonDetectionConfig = _reflection.GeneratedProtocolMessageType( "PersonDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_PERSONDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for PERSON\_DETECTION. + { + "DESCRIPTOR": _PERSONDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for PERSON_DETECTION. Attributes: @@ -4997,26 +4990,26 @@ annotation output. include_pose_landmarks: Whether to enable pose landmarks detection. Ignored if - 'include\_bounding\_boxes' is false. + ‘include_bounding_boxes’ is false. include_attributes: Whether to enable person attributes detection, such as cloth color (black, blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair color (black, blonde, etc), hair length (long, short, bald), etc. Ignored if - 'include\_bounding\_boxes' is false. + ‘include_bounding_boxes’ is false. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(PersonDetectionConfig) TextDetectionConfig = _reflection.GeneratedProtocolMessageType( "TextDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_TEXTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for TEXT\_DETECTION. + { + "DESCRIPTOR": _TEXTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for TEXT_DETECTION. Attributes: @@ -5028,20 +5021,20 @@ is provided. model: Model to use for text detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". + “builtin/stable” (the default if unset) and “builtin/latest”. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.TextDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(TextDetectionConfig) VideoSegment = _reflection.GeneratedProtocolMessageType( "VideoSegment", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOSEGMENT, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video segment. + { + "DESCRIPTOR": _VIDEOSEGMENT, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video segment. Attributes: @@ -5053,17 +5046,17 @@ corresponding to the end of the segment (inclusive). """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.VideoSegment) - ), + }, ) _sym_db.RegisterMessage(VideoSegment) LabelSegment = _reflection.GeneratedProtocolMessageType( "LabelSegment", (_message.Message,), - dict( - DESCRIPTOR=_LABELSEGMENT, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for label + { + "DESCRIPTOR": _LABELSEGMENT, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for label detection. @@ -5074,17 +5067,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.LabelSegment) - ), + }, ) _sym_db.RegisterMessage(LabelSegment) LabelFrame = _reflection.GeneratedProtocolMessageType( "LabelFrame", (_message.Message,), - dict( - DESCRIPTOR=_LABELFRAME, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for label detection. + { + "DESCRIPTOR": _LABELFRAME, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for label detection. Attributes: @@ -5095,17 +5088,17 @@ Confidence that the label is accurate. Range: [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.LabelFrame) - ), + }, ) _sym_db.RegisterMessage(LabelFrame) Entity = _reflection.GeneratedProtocolMessageType( "Entity", (_message.Message,), - dict( - DESCRIPTOR=_ENTITY, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Detected entity from video analysis. + { + "DESCRIPTOR": _ENTITY, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Detected entity from video analysis. Attributes: @@ -5119,17 +5112,17 @@ Language code for ``description`` in BCP-47 format. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.Entity) - ), + }, ) _sym_db.RegisterMessage(Entity) LabelAnnotation = _reflection.GeneratedProtocolMessageType( "LabelAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_LABELANNOTATION, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Label annotation. + { + "DESCRIPTOR": _LABELANNOTATION, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Label annotation. Attributes: @@ -5146,17 +5139,17 @@ All video frames where a label was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.LabelAnnotation) - ), + }, ) _sym_db.RegisterMessage(LabelAnnotation) ExplicitContentFrame = _reflection.GeneratedProtocolMessageType( "ExplicitContentFrame", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTFRAME, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for explicit content. + { + "DESCRIPTOR": _EXPLICITCONTENTFRAME, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for explicit content. Attributes: @@ -5167,17 +5160,17 @@ Likelihood of the pornography content.. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentFrame) ExplicitContentAnnotation = _reflection.GeneratedProtocolMessageType( "ExplicitContentAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTANNOTATION, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual + { + "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. @@ -5187,17 +5180,17 @@ All video frames where explicit content was detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation) - ), + }, ) _sym_db.RegisterMessage(ExplicitContentAnnotation) NormalizedBoundingBox = _reflection.GeneratedProtocolMessageType( "NormalizedBoundingBox", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDBOUNDINGBOX, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding box. The normalized vertex coordinates + { + "DESCRIPTOR": _NORMALIZEDBOUNDINGBOX, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1]. @@ -5212,18 +5205,18 @@ Bottom Y coordinate. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox) - ), + }, ) _sym_db.RegisterMessage(NormalizedBoundingBox) TimestampedObject = _reflection.GeneratedProtocolMessageType( "TimestampedObject", (_message.Message,), - dict( - DESCRIPTOR=_TIMESTAMPEDOBJECT, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""For tracking related features. An object at time\_offset - with attributes, and located with normalized\_bounding\_box. + { + "DESCRIPTOR": _TIMESTAMPEDOBJECT, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """For tracking related features. An object at time_offset + with attributes, and located with normalized_bounding_box. Attributes: @@ -5239,17 +5232,17 @@ Optional. The detected landmarks. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.TimestampedObject) - ), + }, ) _sym_db.RegisterMessage(TimestampedObject) Track = _reflection.GeneratedProtocolMessageType( "Track", (_message.Message,), - dict( - DESCRIPTOR=_TRACK, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""A track of an object instance. + { + "DESCRIPTOR": _TRACK, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """A track of an object instance. Attributes: @@ -5264,43 +5257,43 @@ Optional. The confidence score of the tracked object. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.Track) - ), + }, ) _sym_db.RegisterMessage(Track) DetectedAttribute = _reflection.GeneratedProtocolMessageType( "DetectedAttribute", (_message.Message,), - dict( - DESCRIPTOR=_DETECTEDATTRIBUTE, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""A generic detected attribute represented by name in string + { + "DESCRIPTOR": _DETECTEDATTRIBUTE, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """A generic detected attribute represented by name in string format. Attributes: name: - The name of the attribute, i.e. glasses, dark\_glasses, - mouth\_open etc. A full list of supported type names will be + The name of the attribute, i.e. glasses, dark_glasses, + mouth_open etc. A full list of supported type names will be provided in the document. confidence: Detected attribute confidence. Range [0, 1]. value: Text value of the detection result. For example, the value for - "HairColor" can be "black", "blonde", etc. + “HairColor” can be “black”, “blonde”, etc. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.DetectedAttribute) - ), + }, ) _sym_db.RegisterMessage(DetectedAttribute) Celebrity = _reflection.GeneratedProtocolMessageType( "Celebrity", (_message.Message,), - dict( - DESCRIPTOR=_CELEBRITY, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Celebrity definition. + { + "DESCRIPTOR": _CELEBRITY, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Celebrity definition. Attributes: @@ -5316,21 +5309,21 @@ celebrity, if applicable. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.Celebrity) - ), + }, ) _sym_db.RegisterMessage(Celebrity) CelebrityTrack = _reflection.GeneratedProtocolMessageType( "CelebrityTrack", (_message.Message,), - dict( - RecognizedCelebrity=_reflection.GeneratedProtocolMessageType( + { + "RecognizedCelebrity": _reflection.GeneratedProtocolMessageType( "RecognizedCelebrity", (_message.Message,), - dict( - DESCRIPTOR=_CELEBRITYTRACK_RECOGNIZEDCELEBRITY, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""The recognized celebrity with confidence score. + { + "DESCRIPTOR": _CELEBRITYTRACK_RECOGNIZEDCELEBRITY, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """The recognized celebrity with confidence score. Attributes: @@ -5340,11 +5333,11 @@ Recognition confidence. Range [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity) - ), + }, ), - DESCRIPTOR=_CELEBRITYTRACK, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""The annotation result of a celebrity face track. + "DESCRIPTOR": _CELEBRITYTRACK, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """The annotation result of a celebrity face track. RecognizedCelebrity field could be empty if the face track does not have any matched celebrities. @@ -5353,10 +5346,10 @@ celebrities: Top N match of the celebrities for the face in this track. face_track: - A track of a person's face. + A track of a person’s face. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.CelebrityTrack) - ), + }, ) _sym_db.RegisterMessage(CelebrityTrack) _sym_db.RegisterMessage(CelebrityTrack.RecognizedCelebrity) @@ -5364,10 +5357,10 @@ CelebrityRecognitionAnnotation = _reflection.GeneratedProtocolMessageType( "CelebrityRecognitionAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_CELEBRITYRECOGNITIONANNOTATION, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Celebrity recognition annotation per video. + { + "DESCRIPTOR": _CELEBRITYRECOGNITIONANNOTATION, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Celebrity recognition annotation per video. Attributes: @@ -5376,23 +5369,23 @@ celebrities and other detected faces in the video. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation) - ), + }, ) _sym_db.RegisterMessage(CelebrityRecognitionAnnotation) DetectedLandmark = _reflection.GeneratedProtocolMessageType( "DetectedLandmark", (_message.Message,), - dict( - DESCRIPTOR=_DETECTEDLANDMARK, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""A generic detected landmark represented by name in string + { + "DESCRIPTOR": _DETECTEDLANDMARK, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """A generic detected landmark represented by name in string format and a 2D location. Attributes: name: - The name of this landmark, i.e. left\_hand, right\_shoulder. + The name of this landmark, i.e. left_hand, right_shoulder. point: The 2D point of the detected landmark using the normalized image coordindate system. The normalized coordinates have the @@ -5401,37 +5394,37 @@ The confidence score of the detected landmark. Range [0, 1]. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.DetectedLandmark) - ), + }, ) _sym_db.RegisterMessage(DetectedLandmark) FaceDetectionAnnotation = _reflection.GeneratedProtocolMessageType( "FaceDetectionAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_FACEDETECTIONANNOTATION, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Face detection annotation. + { + "DESCRIPTOR": _FACEDETECTIONANNOTATION, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Face detection annotation. Attributes: tracks: The face tracks with attributes. thumbnail: - The thumbnail of a person's face. + The thumbnail of a person’s face. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation) - ), + }, ) _sym_db.RegisterMessage(FaceDetectionAnnotation) PersonDetectionAnnotation = _reflection.GeneratedProtocolMessageType( "PersonDetectionAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_PERSONDETECTIONANNOTATION, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Person detection annotation per video. + { + "DESCRIPTOR": _PERSONDETECTIONANNOTATION, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Person detection annotation per video. Attributes: @@ -5439,17 +5432,17 @@ The trackes that a person is detected. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation) - ), + }, ) _sym_db.RegisterMessage(PersonDetectionAnnotation) VideoAnnotationResults = _reflection.GeneratedProtocolMessageType( "VideoAnnotationResults", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONRESULTS, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Annotation results for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Annotation results for a single video. Attributes: @@ -5469,7 +5462,7 @@ ``segment_label_annotations``, this field presents more fine- grained, segment-level labels detected in video content and is made available only when the client sets - ``LabelDetectionConfig.model`` to "builtin/latest" in the + ``LabelDetectionConfig.model`` to “builtin/latest” in the request. shot_label_annotations: Topical label annotations on shot level. There is exactly one @@ -5480,7 +5473,7 @@ topical ``shot_label_annotations``, this field presents more fine-grained, shot-level labels detected in video content and is made available only when the client sets - ``LabelDetectionConfig.model`` to "builtin/latest" in the + ``LabelDetectionConfig.model`` to “builtin/latest” in the request. frame_label_annotations: Label annotations on frame level. There is exactly one element @@ -5512,17 +5505,17 @@ fail. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationResults) AnnotateVideoResponse = _reflection.GeneratedProtocolMessageType( "AnnotateVideoResponse", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEORESPONSE, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` + { + "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -5533,17 +5526,17 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoResponse) VideoAnnotationProgress = _reflection.GeneratedProtocolMessageType( "VideoAnnotationProgress", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOANNOTATIONPROGRESS, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Annotation progress for a single video. + { + "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Annotation progress for a single video. Attributes: @@ -5565,17 +5558,17 @@ contains more than one segments. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress) - ), + }, ) _sym_db.RegisterMessage(VideoAnnotationProgress) AnnotateVideoProgress = _reflection.GeneratedProtocolMessageType( "AnnotateVideoProgress", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` + { + "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -5586,24 +5579,24 @@ ``AnnotateVideoRequest``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress) - ), + }, ) _sym_db.RegisterMessage(AnnotateVideoProgress) SpeechTranscriptionConfig = _reflection.GeneratedProtocolMessageType( "SpeechTranscriptionConfig", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHTRANSCRIPTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for SPEECH\_TRANSCRIPTION. + { + "DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for SPEECH_TRANSCRIPTION. Attributes: language_code: Required. *Required* The language of the supplied audio as a `BCP-47 `__ - language tag. Example: "en-US". See `Language Support + language tag. Example: “en-US”. See `Language Support `__ for a list of the currently supported language codes. max_alternatives: @@ -5617,29 +5610,29 @@ filter_profanity: Optional. If set to ``true``, the server will attempt to filter out profanities, replacing all but the initial - character in each filtered word with asterisks, e.g. - "f\*\*\*". If set to ``false`` or omitted, profanities won't - be filtered out. + character in each filtered word with asterisks, e.g. "f***". + If set to ``false`` or omitted, profanities won’t be filtered + out. speech_contexts: Optional. A means to provide context to assist the speech recognition. enable_automatic_punctuation: - Optional. If 'true', adds punctuation to recognition result + Optional. If ‘true’, adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no - effect at all. The default 'false' value does not add - punctuation to result hypotheses. NOTE: "This is currently + effect at all. The default ‘false’ value does not add + punctuation to result hypotheses. NOTE: “This is currently offered as an experimental service, complimentary to all users. In the future this may be exclusively available as a - premium feature." + premium feature.” audio_tracks: Optional. For file formats, such as MXF or MKV, supporting multiple audio tracks, specify up to two tracks. Default: track 0. enable_speaker_diarization: - Optional. If 'true', enables speaker detection for each + Optional. If ‘true’, enables speaker detection for each recognized word in the top alternative of the recognition - result using a speaker\_tag provided in the WordInfo. Note: + result using a speaker_tag provided in the WordInfo. Note: When this is true, we send all the words from the beginning of the audio for the top alternative in every consecutive responses. This is done in order to improve our speaker tags @@ -5647,8 +5640,8 @@ conversation over time. diarization_speaker_count: Optional. If set, specifies the estimated number of speakers - in the conversation. If not set, defaults to '2'. Ignored - unless enable\_speaker\_diarization is set to true. + in the conversation. If not set, defaults to ‘2’. Ignored + unless enable_speaker_diarization is set to true. enable_word_confidence: Optional. If ``true``, the top result includes a list of words and the confidence for those words. If ``false``, no word- @@ -5656,24 +5649,24 @@ ``false``. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig) - ), + }, ) _sym_db.RegisterMessage(SpeechTranscriptionConfig) SpeechContext = _reflection.GeneratedProtocolMessageType( "SpeechContext", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHCONTEXT, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Provides "hints" to the speech recognizer to favor + { + "DESCRIPTOR": _SPEECHCONTEXT, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Provides “hints” to the speech recognizer to favor specific words and phrases in the results. Attributes: phrases: Optional. A list of strings containing words and phrases - "hints" so that the speech recognition is more likely to + “hints” so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add @@ -5682,17 +5675,17 @@ `__. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.SpeechContext) - ), + }, ) _sym_db.RegisterMessage(SpeechContext) SpeechTranscription = _reflection.GeneratedProtocolMessageType( "SpeechTranscription", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHTRANSCRIPTION, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""A speech recognition result corresponding to a portion of + { + "DESCRIPTOR": _SPEECHTRANSCRIPTION, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """A speech recognition result corresponding to a portion of the audio. @@ -5710,17 +5703,17 @@ most likelihood of being spoken in the audio. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.SpeechTranscription) - ), + }, ) _sym_db.RegisterMessage(SpeechTranscription) SpeechRecognitionAlternative = _reflection.GeneratedProtocolMessageType( "SpeechRecognitionAlternative", (_message.Message,), - dict( - DESCRIPTOR=_SPEECHRECOGNITIONALTERNATIVE, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Alternative hypotheses (a.k.a. n-best list). + { + "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Alternative hypotheses (a.k.a. n-best list). Attributes: @@ -5741,17 +5734,17 @@ audio. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative) - ), + }, ) _sym_db.RegisterMessage(SpeechRecognitionAlternative) WordInfo = _reflection.GeneratedProtocolMessageType( "WordInfo", (_message.Message,), - dict( - DESCRIPTOR=_WORDINFO, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Word-specific information for recognized words. Word + { + "DESCRIPTOR": _WORDINFO, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as ``enable_word_time_offsets``. @@ -5783,21 +5776,21 @@ Output only. A distinct integer value is assigned for every speaker within the audio. This field specifies which one of those speakers was detected to have spoken this word. Value - ranges from 1 up to diarization\_speaker\_count, and is only - set if speaker diarization is enabled. + ranges from 1 up to diarization_speaker_count, and is only set + if speaker diarization is enabled. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.WordInfo) - ), + }, ) _sym_db.RegisterMessage(WordInfo) NormalizedVertex = _reflection.GeneratedProtocolMessageType( "NormalizedVertex", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDVERTEX, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""X coordinate. + { + "DESCRIPTOR": _NORMALIZEDVERTEX, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """X coordinate. Attributes: @@ -5805,24 +5798,24 @@ Y coordinate. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.NormalizedVertex) - ), + }, ) _sym_db.RegisterMessage(NormalizedVertex) NormalizedBoundingPoly = _reflection.GeneratedProtocolMessageType( "NormalizedBoundingPoly", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDBOUNDINGPOLY, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding polygon for text (that might not be + { + "DESCRIPTOR": _NORMALIZEDBOUNDINGPOLY, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular - bounding box: When the text is horizontal it might look like: 0----1 \| - \| 3----2 + bounding box: When the text is horizontal it might look like: 0—-1 \| \| + 3—-2 - When it's clockwise rotated 180 degrees around the top-left corner it - becomes: 2----3 \| \| 1----0 + When it’s clockwise rotated 180 degrees around the top-left corner it + becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for @@ -5834,17 +5827,17 @@ Normalized vertices of the bounding polygon. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly) - ), + }, ) _sym_db.RegisterMessage(NormalizedBoundingPoly) TextSegment = _reflection.GeneratedProtocolMessageType( "TextSegment", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSEGMENT, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for text detection. + { + "DESCRIPTOR": _TEXTSEGMENT, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video segment level annotation results for text detection. Attributes: @@ -5858,17 +5851,17 @@ appears. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.TextSegment) - ), + }, ) _sym_db.RegisterMessage(TextSegment) TextFrame = _reflection.GeneratedProtocolMessageType( "TextFrame", (_message.Message,), - dict( - DESCRIPTOR=_TEXTFRAME, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for text annotation + { + "DESCRIPTOR": _TEXTFRAME, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets. @@ -5880,17 +5873,17 @@ Timestamp of this frame. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.TextFrame) - ), + }, ) _sym_db.RegisterMessage(TextFrame) TextAnnotation = _reflection.GeneratedProtocolMessageType( "TextAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TEXTANNOTATION, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Annotations related to one detected OCR text snippet. This + { + "DESCRIPTOR": _TEXTANNOTATION, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection. @@ -5902,17 +5895,17 @@ All video segments where OCR detected text appears. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.TextAnnotation) - ), + }, ) _sym_db.RegisterMessage(TextAnnotation) ObjectTrackingFrame = _reflection.GeneratedProtocolMessageType( "ObjectTrackingFrame", (_message.Message,), - dict( - DESCRIPTOR=_OBJECTTRACKINGFRAME, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotations for object detection and + { + "DESCRIPTOR": _OBJECTTRACKINGFRAME, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence. @@ -5925,17 +5918,17 @@ The timestamp of the frame in microseconds. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame) - ), + }, ) _sym_db.RegisterMessage(ObjectTrackingFrame) ObjectTrackingAnnotation = _reflection.GeneratedProtocolMessageType( "ObjectTrackingAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_OBJECTTRACKINGANNOTATION, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Annotations corresponding to one tracked object. + { + "DESCRIPTOR": _OBJECTTRACKINGANNOTATION, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Annotations corresponding to one tracked object. Attributes: @@ -5943,7 +5936,7 @@ Entity to specify the object category that this track is labeled as. confidence: - Object category's labeling confidence of this track. + Object category’s labeling confidence of this track. frames: Information corresponding to all frames where this object track appears. Non-streaming batch mode: it may be one or @@ -5960,22 +5953,22 @@ Streaming mode ONLY. In streaming mode, we do not know the end time of a tracked object before it is completed. Hence, there is no VideoSegment info returned. Instead, we provide a unique - identifiable integer track\_id so that the customers can + identifiable integer track_id so that the customers can correlate the results of the ongoing ObjectTrackAnnotation of - the same track\_id over time. + the same track_id over time. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation) - ), + }, ) _sym_db.RegisterMessage(ObjectTrackingAnnotation) LogoRecognitionAnnotation = _reflection.GeneratedProtocolMessageType( "LogoRecognitionAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_LOGORECOGNITIONANNOTATION, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Annotation corresponding to one detected, tracked and + { + "DESCRIPTOR": _LOGORECOGNITIONANNOTATION, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Annotation corresponding to one detected, tracked and recognized logo class. @@ -5994,17 +5987,17 @@ in one VideoSegment. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation) - ), + }, ) _sym_db.RegisterMessage(LogoRecognitionAnnotation) StreamingAnnotateVideoRequest = _reflection.GeneratedProtocolMessageType( "StreamingAnnotateVideoRequest", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGANNOTATEVIDEOREQUEST, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""The top-level message sent by the client for the + { + "DESCRIPTOR": _STREAMINGANNOTATEVIDEOREQUEST, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """The top-level message sent by the client for the ``StreamingAnnotateVideo`` method. Multiple ``StreamingAnnotateVideoRequest`` messages are sent. The first message must only contain a ``StreamingVideoConfig`` message. All subsequent @@ -6030,17 +6023,17 @@ protobuffers use a pure binary representation (not base64). """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest) - ), + }, ) _sym_db.RegisterMessage(StreamingAnnotateVideoRequest) StreamingAnnotateVideoResponse = _reflection.GeneratedProtocolMessageType( "StreamingAnnotateVideoResponse", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGANNOTATEVIDEORESPONSE, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""\ ``StreamingAnnotateVideoResponse`` is the only message + { + "DESCRIPTOR": _STREAMINGANNOTATEVIDEORESPONSE, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """\ ``StreamingAnnotateVideoResponse`` is the only message returned to the client by ``StreamingAnnotateVideo``. A series of zero or more ``StreamingAnnotateVideoResponse`` messages are streamed back to the client. @@ -6056,20 +6049,20 @@ GCS URI that stores annotation results of one streaming session. It is a directory that can hold multiple files in JSON format. Example uri format: - gs://bucket\_id/object\_id/cloud\_project\_name-session\_id + gs://bucket_id/object_id/cloud_project_name-session_id """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse) - ), + }, ) _sym_db.RegisterMessage(StreamingAnnotateVideoResponse) StreamingAutomlClassificationConfig = _reflection.GeneratedProtocolMessageType( "StreamingAutomlClassificationConfig", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGAUTOMLCLASSIFICATIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for STREAMING\_AUTOML\_CLASSIFICATION. + { + "DESCRIPTOR": _STREAMINGAUTOMLCLASSIFICATIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for STREAMING_AUTOML_CLASSIFICATION. Attributes: @@ -6078,17 +6071,17 @@ /locations/{location_id}/models/{model_id}`` """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig) - ), + }, ) _sym_db.RegisterMessage(StreamingAutomlClassificationConfig) StreamingAutomlObjectTrackingConfig = _reflection.GeneratedProtocolMessageType( "StreamingAutomlObjectTrackingConfig", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGAUTOMLOBJECTTRACKINGCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for STREAMING\_AUTOML\_OBJECT\_TRACKING. + { + "DESCRIPTOR": _STREAMINGAUTOMLOBJECTTRACKINGCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for STREAMING_AUTOML_OBJECT_TRACKING. Attributes: @@ -6097,79 +6090,79 @@ /locations/{location_id}/models/{model_id}`` """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig) - ), + }, ) _sym_db.RegisterMessage(StreamingAutomlObjectTrackingConfig) StreamingExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( "StreamingExplicitContentDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGEXPLICITCONTENTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for STREAMING\_EXPLICIT\_CONTENT\_DETECTION. + { + "DESCRIPTOR": _STREAMINGEXPLICITCONTENTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for STREAMING_EXPLICIT_CONTENT_DETECTION. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(StreamingExplicitContentDetectionConfig) StreamingLabelDetectionConfig = _reflection.GeneratedProtocolMessageType( "StreamingLabelDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGLABELDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for STREAMING\_LABEL\_DETECTION. + { + "DESCRIPTOR": _STREAMINGLABELDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for STREAMING_LABEL_DETECTION. Attributes: stationary_camera: - Whether the video has been captured from a stationary (i.e. - non-moving) camera. When set to true, might improve detection - accuracy for moving objects. Default: false. + Whether the video has been captured from a stationary + (i.e. non-moving) camera. When set to true, might improve + detection accuracy for moving objects. Default: false. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(StreamingLabelDetectionConfig) StreamingObjectTrackingConfig = _reflection.GeneratedProtocolMessageType( "StreamingObjectTrackingConfig", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGOBJECTTRACKINGCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for STREAMING\_OBJECT\_TRACKING. + { + "DESCRIPTOR": _STREAMINGOBJECTTRACKINGCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for STREAMING_OBJECT_TRACKING. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig) - ), + }, ) _sym_db.RegisterMessage(StreamingObjectTrackingConfig) StreamingShotChangeDetectionConfig = _reflection.GeneratedProtocolMessageType( "StreamingShotChangeDetectionConfig", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGSHOTCHANGEDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for STREAMING\_SHOT\_CHANGE\_DETECTION. + { + "DESCRIPTOR": _STREAMINGSHOTCHANGEDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for STREAMING_SHOT_CHANGE_DETECTION. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig) - ), + }, ) _sym_db.RegisterMessage(StreamingShotChangeDetectionConfig) StreamingStorageConfig = _reflection.GeneratedProtocolMessageType( "StreamingStorageConfig", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGSTORAGECONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Config for streaming storage option. + { + "DESCRIPTOR": _STREAMINGSTORAGECONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for streaming storage option. Attributes: @@ -6179,8 +6172,8 @@ GCS URI to store all annotation results for one client. Client should specify this field as the top-level storage directory. Annotation results of different sessions will be put into - different sub-directories denoted by project\_name and - session\_id. All sub-directories will be auto generated by + different sub-directories denoted by project_name and + session_id. All sub-directories will be auto generated by program and will be made accessible to client in response proto. URIs must be specified in the following format: ``gs://bucket-id/object-id`` ``bucket-id`` should be a valid @@ -6190,17 +6183,17 @@ and cause GCS write failure. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig) - ), + }, ) _sym_db.RegisterMessage(StreamingStorageConfig) StreamingVideoAnnotationResults = _reflection.GeneratedProtocolMessageType( "StreamingVideoAnnotationResults", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGVIDEOANNOTATIONRESULTS, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Streaming annotation results corresponding to a portion of + { + "DESCRIPTOR": _STREAMINGVIDEOANNOTATIONRESULTS, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Streaming annotation results corresponding to a portion of the video that is currently being processed. @@ -6216,17 +6209,17 @@ Object tracking results. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults) - ), + }, ) _sym_db.RegisterMessage(StreamingVideoAnnotationResults) StreamingVideoConfig = _reflection.GeneratedProtocolMessageType( "StreamingVideoConfig", (_message.Message,), - dict( - DESCRIPTOR=_STREAMINGVIDEOCONFIG, - __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Provides information to the annotator that specifies how + { + "DESCRIPTOR": _STREAMINGVIDEOCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Provides information to the annotator that specifies how to process the request. @@ -6236,22 +6229,22 @@ streaming_config: Config for requested annotation feature. shot_change_detection_config: - Config for STREAMING\_SHOT\_CHANGE\_DETECTION. + Config for STREAMING_SHOT_CHANGE_DETECTION. label_detection_config: - Config for STREAMING\_LABEL\_DETECTION. + Config for STREAMING_LABEL_DETECTION. explicit_content_detection_config: - Config for STREAMING\_EXPLICIT\_CONTENT\_DETECTION. + Config for STREAMING_EXPLICIT_CONTENT_DETECTION. object_tracking_config: - Config for STREAMING\_OBJECT\_TRACKING. + Config for STREAMING_OBJECT_TRACKING. automl_classification_config: - Config for STREAMING\_AUTOML\_CLASSIFICATION. + Config for STREAMING_AUTOML_CLASSIFICATION. automl_object_tracking_config: - Config for STREAMING\_AUTOML\_OBJECT\_TRACKING. + Config for STREAMING_AUTOML_OBJECT_TRACKING. storage_config: Streaming storage option. By default: storage is disabled. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig) - ), + }, ) _sym_db.RegisterMessage(StreamingVideoConfig) @@ -6287,9 +6280,7 @@ full_name="google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), + serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", serialized_start=11839, serialized_end=12173, methods=[ @@ -6300,9 +6291,7 @@ containing_service=None, input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002\037"\032/v1p3beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress' - ), + serialized_options=b'\202\323\344\223\002\037"\032/v1p3beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', ) ], ) @@ -6316,9 +6305,7 @@ full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService", file=DESCRIPTOR, index=1, - serialized_options=_b( - "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), + serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", serialized_start=12176, serialized_end=12477, methods=[ diff --git a/synth.metadata b/synth.metadata index d4f10923..847fb7de 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,25 +1,18 @@ { "sources": [ - { - "generator": { - "name": "artman", - "version": "2.0.0", - "dockerImage": "googleapis/artman@sha256:b3b47805231a305d0f40c4bf069df20f6a2635574e6d4259fac651d3f9f6e098" - } - }, { "git": { "name": ".", - "remote": "git@github.com:googleapis/python-videointelligence.git", - "sha": "78ed59825bbb77942b1382981cd531b219201a67" + "remote": "https://github.com/googleapis/python-videointelligence.git", + "sha": "ce1e7defb7597f5afd3eb22b23259407382e5faa" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "ee9e8e4e67f06aba881392a1a2956fbe7a42d216", - "internalRef": "309481123" + "sha": "e0f9d9e1f9de890db765be46f45ca8490723e3eb", + "internalRef": "309824146" } }, { @@ -37,8 +30,7 @@ "apiName": "videointelligence", "apiVersion": "v1beta2", "language": "python", - "generator": "gapic", - "config": "google/cloud/videointelligence/artman_videointelligence_v1beta2.yaml" + "generator": "bazel" } }, { @@ -47,8 +39,7 @@ "apiName": "videointelligence", "apiVersion": "v1p1beta1", "language": "python", - "generator": "gapic", - "config": "google/cloud/videointelligence/artman_videointelligence_v1p1beta1.yaml" + "generator": "bazel" } }, { @@ -57,8 +48,7 @@ "apiName": "videointelligence", "apiVersion": "v1p2beta1", "language": "python", - "generator": "gapic", - "config": "google/cloud/videointelligence/artman_videointelligence_v1p2beta1.yaml" + "generator": "bazel" } }, { @@ -67,8 +57,7 @@ "apiName": "videointelligence", "apiVersion": "v1p3beta1", "language": "python", - "generator": "gapic", - "config": "google/cloud/videointelligence/artman_videointelligence_v1p3beta1.yaml" + "generator": "bazel" } }, { @@ -77,8 +66,7 @@ "apiName": "videointelligence", "apiVersion": "v1", "language": "python", - "generator": "gapic", - "config": "google/cloud/videointelligence/artman_videointelligence_v1.yaml" + "generator": "bazel" } } ] diff --git a/tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py index b457c478..febc6a4c 100644 --- a/tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py +++ b/tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py @@ -83,17 +83,17 @@ def test_annotate_video(self): client = videointelligence_v1beta2.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" - response = client.annotate_video(input_uri=input_uri, features=features) + response = client.annotate_video(features, input_uri=input_uri) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = video_intelligence_pb2.AnnotateVideoRequest( - input_uri=input_uri, features=features + features=features, input_uri=input_uri ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -114,10 +114,10 @@ def test_annotate_video_exception(self): client = videointelligence_v1beta2.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" - response = client.annotate_video(input_uri=input_uri, features=features) + response = client.annotate_video(features, input_uri=input_uri) exception = response.exception() assert exception.errors[0] == error diff --git a/tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py b/tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py index 6dddff91..2e86709b 100644 --- a/tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py +++ b/tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py @@ -83,17 +83,17 @@ def test_annotate_video(self): client = videointelligence_v1p1beta1.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" - response = client.annotate_video(input_uri=input_uri, features=features) + response = client.annotate_video(features, input_uri=input_uri) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = video_intelligence_pb2.AnnotateVideoRequest( - input_uri=input_uri, features=features + features=features, input_uri=input_uri ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -114,10 +114,10 @@ def test_annotate_video_exception(self): client = videointelligence_v1p1beta1.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" - response = client.annotate_video(input_uri=input_uri, features=features) + response = client.annotate_video(features, input_uri=input_uri) exception = response.exception() assert exception.errors[0] == error diff --git a/tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py b/tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py index 01f0ad5c..3de5951c 100644 --- a/tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py +++ b/tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py @@ -83,17 +83,17 @@ def test_annotate_video(self): client = videointelligence_v1p2beta1.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" - response = client.annotate_video(input_uri=input_uri, features=features) + response = client.annotate_video(features, input_uri=input_uri) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = video_intelligence_pb2.AnnotateVideoRequest( - input_uri=input_uri, features=features + features=features, input_uri=input_uri ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -114,10 +114,10 @@ def test_annotate_video_exception(self): client = videointelligence_v1p2beta1.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" - response = client.annotate_video(input_uri=input_uri, features=features) + response = client.annotate_video(features, input_uri=input_uri) exception = response.exception() assert exception.errors[0] == error diff --git a/tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py b/tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py index e6f9d431..4f07261a 100644 --- a/tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py +++ b/tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py @@ -83,17 +83,17 @@ def test_annotate_video(self): client = videointelligence_v1p3beta1.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" - response = client.annotate_video(input_uri=input_uri, features=features) + response = client.annotate_video(features, input_uri=input_uri) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = video_intelligence_pb2.AnnotateVideoRequest( - input_uri=input_uri, features=features + features=features, input_uri=input_uri ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -114,10 +114,10 @@ def test_annotate_video_exception(self): client = videointelligence_v1p3beta1.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" - response = client.annotate_video(input_uri=input_uri, features=features) + response = client.annotate_video(features, input_uri=input_uri) exception = response.exception() assert exception.errors[0] == error From 2a26908ba01a42e622fc0a1fa6b223de592a1dd1 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:01:14 -0700 Subject: [PATCH 02/17] chore(python): create thread_safety doc (#517) This will be included in index.rst files. See https://github.com/googleapis/python-bigtable/pull/26/files Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Tue May 5 10:06:52 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 09c48461232ce929c34386259eb59018ad2d8eef Source-Link: https://github.com/googleapis/synthtool/commit/09c48461232ce929c34386259eb59018ad2d8eef --- docs/multiprocessing.rst | 7 +++++++ synth.metadata | 6 +++--- 2 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 docs/multiprocessing.rst diff --git a/docs/multiprocessing.rst b/docs/multiprocessing.rst new file mode 100644 index 00000000..1cb29d4c --- /dev/null +++ b/docs/multiprocessing.rst @@ -0,0 +1,7 @@ +.. note:: + + Because this client uses :mod:`grpcio` library, it is safe to + share instances across threads. In multiprocessing scenarios, the best + practice is to create client instances *after* the invocation of + :func:`os.fork` by :class:`multiprocessing.Pool` or + :class:`multiprocessing.Process`. diff --git a/synth.metadata b/synth.metadata index 847fb7de..3e1fb32a 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,15 +11,15 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "e0f9d9e1f9de890db765be46f45ca8490723e3eb", - "internalRef": "309824146" + "sha": "1b5a8d2bbb69c5a04db26bd546d2888e609c6bab", + "internalRef": "309845930" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "04cb397eb7590ea1e6c10a39fca3d8fe0fb3d256" + "sha": "09c48461232ce929c34386259eb59018ad2d8eef" } } ], From 8646aece4c2ace25e5d8cdccbea9d93b3c437fd1 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:02:40 -0700 Subject: [PATCH 03/17] chore(python): remove extra #!/bin/bash (#538) Co-authored-by: Jeffrey Rennie Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Tue May 12 09:51:12 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 7482e79a82e353248769d819788adc1213e8c207 Source-Link: https://github.com/googleapis/synthtool/commit/7482e79a82e353248769d819788adc1213e8c207 --- .kokoro/publish-docs.sh | 2 -- synth.metadata | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index 4c9dbc7f..c319f53f 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Disable buffering, so that the logs stream through. diff --git a/synth.metadata b/synth.metadata index 3e1fb32a..b4a6d243 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,15 +11,15 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "1b5a8d2bbb69c5a04db26bd546d2888e609c6bab", - "internalRef": "309845930" + "sha": "edd3b80fb770548d6ad780105f1782de6ff73ea0", + "internalRef": "311053644" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "09c48461232ce929c34386259eb59018ad2d8eef" + "sha": "7482e79a82e353248769d819788adc1213e8c207" } } ], From d1651d2a2dd404d41675f3032b0548984346b981 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:04:37 -0700 Subject: [PATCH 04/17] chore!: enable gapicv2 for monitoring/v3 API This introduces the following breakages upon code regeneration, which are due to the `TimeSeriesQueryLanguageCondition` type being removed from `alert.proto` in a preceding change. ``` [ERROR] 6011: com.google.monitoring.v3.AlertPolicy$Condition: Field CONDITION_TIME_SERIES_QUERY_LANGUAGE_FIELD_NUMBER has been removed, but it was previously a constant [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition getConditionTimeSeriesQueryLanguage()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageConditionOrBuilder getConditionTimeSeriesQueryLanguageOrBuilder()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition: Method 'public boolean hasConditionTimeSeriesQueryLanguage()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition$Builder: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$Builder clearConditionTimeSeriesQueryLanguage()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition$Builder: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition getConditionTimeSeriesQueryLanguage()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition$Builder: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition$Builder getConditionTimeSeriesQueryLanguageBuilder()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition$Builder: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageConditionOrBuilder getConditionTimeSeriesQueryLanguageOrBuilder()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition$Builder: Method 'public boolean hasConditionTimeSeriesQueryLanguage()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition$Builder: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$Builder mergeConditionTimeSeriesQueryLanguage(com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition)' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition$Builder: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$Builder setConditionTimeSeriesQueryLanguage(com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition)' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$Condition$Builder: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$Builder setConditionTimeSeriesQueryLanguage(com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition$Builder)' has been removed [ERROR] 6001: com.google.monitoring.v3.AlertPolicy$Condition$ConditionCase: Removed field CONDITION_TIME_SERIES_QUERY_LANGUAGE [ERROR] 8001: com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition: Class com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition removed [ERROR] 8001: com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition$Builder: Class com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition$Builder removed [ERROR] 8001: com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageConditionOrBuilder: Class com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageConditionOrBuilder removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$ConditionOrBuilder: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageCondition getConditionTimeSeriesQueryLanguage()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$ConditionOrBuilder: Method 'public com.google.monitoring.v3.AlertPolicy$Condition$TimeSeriesQueryLanguageConditionOrBuilder getConditionTimeSeriesQueryLanguageOrBuilder()' has been removed [ERROR] 7002: com.google.monitoring.v3.AlertPolicy$ConditionOrBuilder: Method 'public boolean hasConditionTimeSeriesQueryLanguage()' has been removed ``` Committer: @miraleung PiperOrigin-RevId: 311581003 Source-Author: Google APIs Source-Date: Thu May 14 12:13:51 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 123873dc7e2be35252d172205f67a375e70e9747 Source-Link: https://github.com/googleapis/googleapis/commit/123873dc7e2be35252d172205f67a375e70e9747 --- .../proto/video_intelligence_pb2.py | 4 ++-- .../proto/video_intelligence_pb2.py | 6 +++--- synth.metadata | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py index 6cee1805..c1df7128 100644 --- a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py @@ -2098,8 +2098,8 @@ { "DESCRIPTOR": _NORMALIZEDBOUNDINGBOX, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - "__doc__": """Normalized bounding box. The normalized vertex coordinates are relative - to the original image. Range: [0, 1]. + "__doc__": """Normalized bounding box. The normalized vertex coordinates + are relative to the original image. Range: [0, 1]. Attributes: diff --git a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py index 63dc0539..1b8de3fb 100644 --- a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py @@ -2613,9 +2613,9 @@ { "DESCRIPTOR": _TEXTANNOTATION, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Annotations related to one detected OCR text snippet. This - will contain the corresponding text, confidence value, and frame level - information for each detection. + "__doc__": """Annotations related to one detected OCR text snippet. This will contain + the corresponding text, confidence value, and frame level information + for each detection. Attributes: diff --git a/synth.metadata b/synth.metadata index b4a6d243..bdfcf5d3 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,15 +11,15 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "edd3b80fb770548d6ad780105f1782de6ff73ea0", - "internalRef": "311053644" + "sha": "123873dc7e2be35252d172205f67a375e70e9747", + "internalRef": "311581003" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "7482e79a82e353248769d819788adc1213e8c207" + "sha": "f395615039665af6599f69305efcd886685e74f9" } } ], From 66c9c7c2c7bdf6566658e21bcbe16264ce119065 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:05:24 -0700 Subject: [PATCH 05/17] fix: restore GAPIC v2 retry configs Restores GAPIC v2 retry config overrides that were mistakenly removed during migration. PiperOrigin-RevId: 312088359 Source-Author: Google APIs Source-Date: Mon May 18 08:57:12 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 5a90d467aa65e7f038f87585e8fbb45d74475e7c Source-Link: https://github.com/googleapis/googleapis/commit/5a90d467aa65e7f038f87585e8fbb45d74475e7c --- .../video_intelligence_service_client_config.py | 12 ++++++------ synth.metadata | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py index 70a9b881..74dc2121 100644 --- a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py +++ b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py @@ -7,19 +7,19 @@ }, "retry_params": { "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.5, + "max_retry_delay_millis": 120000, + "initial_rpc_timeout_millis": 120000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, + "max_rpc_timeout_millis": 120000, "total_timeout_millis": 600000, } }, "methods": { "AnnotateVideo": { "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", + "retry_codes_name": "idempotent", "retry_params_name": "default", } }, diff --git a/synth.metadata b/synth.metadata index bdfcf5d3..81c88a64 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,15 +11,15 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "123873dc7e2be35252d172205f67a375e70e9747", - "internalRef": "311581003" + "sha": "5a90d467aa65e7f038f87585e8fbb45d74475e7c", + "internalRef": "312088359" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f395615039665af6599f69305efcd886685e74f9" + "sha": "7136daa5687f2a5bdcbba2cb25457f56fdf85d59" } } ], From fe1f278e80ffe44b3b43fbf83636b82586428bde Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:05:42 -0700 Subject: [PATCH 06/17] feat(v1p3beta1): added support for streaming automl action recognition. PiperOrigin-RevId: 312101156 Source-Author: Google APIs Source-Date: Mon May 18 10:04:40 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: d1a9f02fd4fb263bae0383b4a5af0bbef33753d6 Source-Link: https://github.com/googleapis/googleapis/commit/d1a9f02fd4fb263bae0383b4a5af0bbef33753d6 --- .../gapic/enums.py | 2 + ...aming_video_intelligence_service_client.py | 2 +- .../video_intelligence_service_client.py | 24 +- .../proto/video_intelligence.proto | 457 +++--- .../proto/video_intelligence_pb2.py | 1310 +++++++++-------- .../proto/video_intelligence_pb2_grpc.py | 8 +- synth.metadata | 4 +- 7 files changed, 962 insertions(+), 845 deletions(-) diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/enums.py b/google/cloud/videointelligence_v1p3beta1/gapic/enums.py index f32e7a2e..b4ba7b25 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/enums.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/enums.py @@ -98,6 +98,7 @@ class StreamingFeature(enum.IntEnum): STREAMING_SHOT_CHANGE_DETECTION (int): Shot change detection. STREAMING_EXPLICIT_CONTENT_DETECTION (int): Explicit content detection. STREAMING_OBJECT_TRACKING (int): Object detection and tracking. + STREAMING_AUTOML_ACTION_RECOGNITION (int): Action recognition based on AutoML model. STREAMING_AUTOML_CLASSIFICATION (int): Video classification based on AutoML model. STREAMING_AUTOML_OBJECT_TRACKING (int): Object detection and tracking based on AutoML model. """ @@ -107,5 +108,6 @@ class StreamingFeature(enum.IntEnum): STREAMING_SHOT_CHANGE_DETECTION = 2 STREAMING_EXPLICIT_CONTENT_DETECTION = 3 STREAMING_OBJECT_TRACKING = 4 + STREAMING_AUTOML_ACTION_RECOGNITION = 23 STREAMING_AUTOML_CLASSIFICATION = 21 STREAMING_AUTOML_OBJECT_TRACKING = 22 diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py b/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py index 27fd9a98..46b5a988 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py @@ -46,7 +46,7 @@ class StreamingVideoIntelligenceServiceClient(object): - """Service that implements streaming Google Cloud Video Intelligence API.""" + """Service that implements streaming Video Intelligence API.""" SERVICE_ADDRESS = "videointelligence.googleapis.com:443" """The default address of the service.""" diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py index e163ecac..6951e17c 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py @@ -47,7 +47,7 @@ class VideoIntelligenceServiceClient(object): - """Service that implements Google Cloud Video Intelligence API.""" + """Service that implements the Video Intelligence API.""" SERVICE_ADDRESS = "videointelligence.googleapis.com:443" """The default address of the service.""" @@ -232,33 +232,33 @@ def annotate_video( Args: features (list[~google.cloud.videointelligence_v1p3beta1.types.Feature]): Required. Requested video annotation features. - input_uri (str): Input video location. Currently, only `Google Cloud - Storage `__ URIs are supported, which + input_uri (str): Input video location. Currently, only `Cloud + Storage `__ URIs are supported. URIs must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request - URIs `__. A - video URI may include wildcards in ``object-id``, and thus identify - multiple videos. Supported wildcards: '*' to match 0 or more characters; + URIs `__. To + identify multiple videos, a video URI may include wildcards in the + ``object-id``. Supported wildcards: '*' to match 0 or more characters; '?' to match 1 character. If unset, the input video should be embedded - in the request as ``input_content``. If set, ``input_content`` should be + in the request as ``input_content``. If set, ``input_content`` must be unset. input_content (bytes): The video data bytes. If unset, the input video(s) should be - specified via ``input_uri``. If set, ``input_uri`` should be unset. + specified via the ``input_uri``. If set, ``input_uri`` must be unset. video_context (Union[dict, ~google.cloud.videointelligence_v1p3beta1.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1p3beta1.types.VideoContext` output_uri (str): Optional. Location where the output (in JSON format) should be - stored. Currently, only `Google Cloud - Storage `__ URIs are supported, which + stored. Currently, only `Cloud + Storage `__ URIs are supported. These must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs `__. location_id (str): Optional. Cloud region where annotation should take place. Supported - cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``, - ``asia-east1``. If no region is specified, a region will be determined + cloud regions are: ``us-east1``, ``us-west1``, ``europe-west1``, + ``asia-east1``. If no region is specified, the region will be determined based on video file location. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto index 3d418e2f..6284e0db 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -32,7 +31,7 @@ option java_outer_classname = "VideoIntelligenceServiceProto"; option java_package = "com.google.cloud.videointelligence.v1p3beta1"; option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p3beta1"; -// Service that implements Google Cloud Video Intelligence API. +// Service that implements the Video Intelligence API. service VideoIntelligenceService { option (google.api.default_host) = "videointelligence.googleapis.com"; option (google.api.oauth_scopes) = @@ -56,7 +55,7 @@ service VideoIntelligenceService { } } -// Service that implements streaming Google Cloud Video Intelligence API. +// Service that implements streaming Video Intelligence API. service StreamingVideoIntelligenceService { option (google.api.default_host) = "videointelligence.googleapis.com"; option (google.api.oauth_scopes) = @@ -72,20 +71,21 @@ service StreamingVideoIntelligenceService { // Video annotation request. message AnnotateVideoRequest { // Input video location. Currently, only - // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are - // supported, which must be specified in the following format: + // [Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported. URIs must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video - // URI may include wildcards in `object-id`, and thus identify multiple - // videos. Supported wildcards: '*' to match 0 or more characters; + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify + // multiple videos, a video URI may include wildcards in the `object-id`. + // Supported wildcards: '*' to match 0 or more characters; // '?' to match 1 character. If unset, the input video should be embedded - // in the request as `input_content`. If set, `input_content` should be unset. + // in the request as `input_content`. If set, `input_content` must be unset. string input_uri = 1; // The video data bytes. - // If unset, the input video(s) should be specified via `input_uri`. - // If set, `input_uri` should be unset. + // If unset, the input video(s) should be specified via the `input_uri`. + // If set, `input_uri` must be unset. bytes input_content = 6; // Required. Requested video annotation features. @@ -95,16 +95,18 @@ message AnnotateVideoRequest { VideoContext video_context = 3; // Optional. Location where the output (in JSON format) should be stored. - // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) - // URIs are supported, which must be specified in the following format: + // Currently, only [Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported. These must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. Cloud region where annotation should take place. Supported cloud - // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region - // is specified, a region will be determined based on video file location. + // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + // region is specified, the region will be determined based on video file + // location. string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; } @@ -140,6 +142,42 @@ message VideoContext { ObjectTrackingConfig object_tracking_config = 13; } +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} + // Config for LABEL_DETECTION. message LabelDetectionConfig { // What labels should be detected with LABEL_DETECTION, in addition to @@ -147,9 +185,9 @@ message LabelDetectionConfig { // If unspecified, defaults to `SHOT_MODE`. LabelDetectionMode label_detection_mode = 1; - // Whether the video has been shot from a stationary (i.e. non-moving) camera. - // When set to true, might improve detection accuracy for moving objects. - // Should be used with `SHOT_AND_FRAME_MODE` enabled. + // Whether the video has been shot from a stationary (i.e., non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. bool stationary_camera = 2; // Model to use for label detection. @@ -161,19 +199,82 @@ message LabelDetectionConfig { // frame-level detection. If not set, it is set to 0.4 by default. The valid // range for this threshold is [0.1, 0.9]. Any value set outside of this // range will be clipped. - // Note: for best results please follow the default threshold. We will update + // Note: For best results, follow the default threshold. We will update // the default threshold everytime when we release a new model. float frame_confidence_threshold = 4; // The confidence threshold we perform filtering on the labels from - // video-level and shot-level detections. If not set, it is set to 0.3 by + // video-level and shot-level detections. If not set, it's set to 0.3 by // default. The valid range for this threshold is [0.1, 0.9]. Any value set // outside of this range will be clipped. - // Note: for best results please follow the default threshold. We will update + // Note: For best results, follow the default threshold. We will update // the default threshold everytime when we release a new model. float video_confidence_threshold = 5; } +// Streaming video annotation feature. +enum StreamingFeature { + // Unspecified. + STREAMING_FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + STREAMING_LABEL_DETECTION = 1; + + // Shot change detection. + STREAMING_SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + STREAMING_EXPLICIT_CONTENT_DETECTION = 3; + + // Object detection and tracking. + STREAMING_OBJECT_TRACKING = 4; + + // Action recognition based on AutoML model. + STREAMING_AUTOML_ACTION_RECOGNITION = 23; + + // Video classification based on AutoML model. + STREAMING_AUTOML_CLASSIFICATION = 21; + + // Object detection and tracking based on AutoML model. + STREAMING_AUTOML_OBJECT_TRACKING = 22; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Human face detection. + FACE_DETECTION = 4; + + // Speech transcription. + SPEECH_TRANSCRIPTION = 6; + + // OCR text detection and tracking. + TEXT_DETECTION = 7; + + // Object detection and tracking. + OBJECT_TRACKING = 9; + + // Logo detection, tracking, and recognition. + LOGO_RECOGNITION = 12; + + // Celebrity recognition. + CELEBRITY_RECOGNITION = 13; + + // Person detection. + PERSON_DETECTION = 14; +} + // Config for SHOT_CHANGE_DETECTION. message ShotChangeDetectionConfig { // Model to use for shot change detection. @@ -205,28 +306,28 @@ message FaceDetectionConfig { // "builtin/latest". string model = 1; - // Whether bounding boxes be included in the face annotation output. + // Whether bounding boxes are included in the face annotation output. bool include_bounding_boxes = 2; // Whether to enable face attributes detection, such as glasses, dark_glasses, - // mouth_open etc. Ignored if 'include_bounding_boxes' is false. + // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false. bool include_attributes = 5; } // Config for PERSON_DETECTION. message PersonDetectionConfig { - // Whether bounding boxes be included in the person detection annotation + // Whether bounding boxes are included in the person detection annotation // output. bool include_bounding_boxes = 1; // Whether to enable pose landmarks detection. Ignored if - // 'include_bounding_boxes' is false. + // 'include_bounding_boxes' is set to false. bool include_pose_landmarks = 2; // Whether to enable person attributes detection, such as cloth color (black, - // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair - // color (black, blonde, etc), hair length (long, short, bald), etc. - // Ignored if 'include_bounding_boxes' is false. + // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, + // etc. + // Ignored if 'include_bounding_boxes' is set to false. bool include_attributes = 3; } @@ -282,7 +383,7 @@ message Entity { // API](https://developers.google.com/knowledge-graph/). string entity_id = 1; - // Textual description, e.g. `Fixed-gear bicycle`. + // Textual description, e.g., `Fixed-gear bicycle`. string description = 2; // Language code for `description` in BCP-47 format. @@ -295,9 +396,9 @@ message LabelAnnotation { Entity entity = 1; // Common categories for the detected entity. - // E.g. when the label is `Terrier` the category is likely `dog`. And in some - // cases there might be more than one categories e.g. `Terrier` could also be - // a `pet`. + // For example, when the label is `Terrier`, the category is likely `dog`. And + // in some cases there might be more than one categories e.g., `Terrier` could + // also be a `pet`. repeated Entity category_entities = 2; // All video segments where a label was detected. @@ -380,7 +481,7 @@ message Track { // A generic detected attribute represented by name in string format. message DetectedAttribute { - // The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc. + // The name of the attribute, for example, glasses, dark_glasses, mouth_open. // A full list of supported type names will be provided in the document. string name = 1; @@ -437,7 +538,7 @@ message CelebrityRecognitionAnnotation { // A generic detected landmark represented by name in string format and a 2D // location. message DetectedLandmark { - // The name of this landmark, i.e. left_hand, right_shoulder. + // The name of this landmark, for example, left_hand, right_shoulder. string name = 1; // The 2D point of the detected landmark using the normalized image @@ -459,24 +560,24 @@ message FaceDetectionAnnotation { // Person detection annotation per video. message PersonDetectionAnnotation { - // The trackes that a person is detected. + // The detected tracks of a person. repeated Track tracks = 1; } // Annotation results for a single video. message VideoAnnotationResults { // Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). + // [Cloud Storage](https://cloud.google.com/storage/). string input_uri = 1; // Video segment on which the annotation is run. VideoSegment segment = 10; - // Topical label annotations on video level or user specified segment level. + // Topical label annotations on video level or user-specified segment level. // There is exactly one element for each unique label. repeated LabelAnnotation segment_label_annotations = 2; - // Presence label annotations on video level or user specified segment level. + // Presence label annotations on video level or user-specified segment level. // There is exactly one element for each unique label. Compared to the // existing topical `segment_label_annotations`, this field presents more // fine-grained, segment-level labels detected in video content and is made @@ -544,7 +645,7 @@ message AnnotateVideoResponse { // Annotation progress for a single video. message VideoAnnotationProgress { // Video file location in - // [Google Cloud Storage](https://cloud.google.com/storage/). + // [Cloud Storage](https://cloud.google.com/storage/). string input_uri = 1; // Approximate percentage processed thus far. Guaranteed to be @@ -558,11 +659,11 @@ message VideoAnnotationProgress { google.protobuf.Timestamp update_time = 4; // Specifies which feature is being tracked if the request contains more than - // one features. + // one feature. Feature feature = 5; // Specifies which segment is being tracked if the request contains more than - // one segments. + // one segment. VideoSegment segment = 6; } @@ -617,7 +718,7 @@ message SpeechTranscriptionConfig { // the top alternative of the recognition result using a speaker_tag provided // in the WordInfo. // Note: When this is true, we send all the words from the beginning of the - // audio for the top alternative in every consecutive responses. + // audio for the top alternative in every consecutive response. // This is done in order to improve our speaker tags as our models learn to // identify the speakers in the conversation over time. bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL]; @@ -673,8 +774,8 @@ message SpeechRecognitionAlternative { float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A list of word-specific information for each recognized word. - // Note: When `enable_speaker_diarization` is true, you will see all the words - // from the beginning of the audio. + // Note: When `enable_speaker_diarization` is set to true, you will see all + // the words from the beginning of the audio. repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; } @@ -792,18 +893,6 @@ message ObjectTrackingFrame { // Annotations corresponding to one tracked object. message ObjectTrackingAnnotation { - // Entity to specify the object category that this track is labeled as. - Entity entity = 1; - - // Object category's labeling confidence of this track. - float confidence = 4; - - // Information corresponding to all frames where this object track appears. - // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame - // messages in frames. - // Streaming mode: it can only be one ObjectTrackingFrame message in frames. - repeated ObjectTrackingFrame frames = 2; - // Different representation of tracking info in non-streaming batch // and streaming modes. oneof track_info { @@ -819,6 +908,18 @@ message ObjectTrackingAnnotation { // ObjectTrackAnnotation of the same track_id over time. int64 track_id = 5; } + + // Entity to specify the object category that this track is labeled as. + Entity entity = 1; + + // Object category's labeling confidence of this track. + float confidence = 4; + + // Information corresponding to all frames where this object track appears. + // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + // messages in frames. + // Streaming mode: it can only be one ObjectTrackingFrame message in frames. + repeated ObjectTrackingFrame frames = 2; } // Annotation corresponding to one detected, tracked and recognized logo class. @@ -860,95 +961,9 @@ message StreamingAnnotateVideoRequest { } } -// `StreamingAnnotateVideoResponse` is the only message returned to the client -// by `StreamingAnnotateVideo`. A series of zero or more -// `StreamingAnnotateVideoResponse` messages are streamed back to the client. -message StreamingAnnotateVideoResponse { - // If set, returns a [google.rpc.Status][google.rpc.Status] message that - // specifies the error for the operation. - google.rpc.Status error = 1; - - // Streaming annotation results. - StreamingVideoAnnotationResults annotation_results = 2; - - // GCS URI that stores annotation results of one streaming session. - // It is a directory that can hold multiple files in JSON format. - // Example uri format: - // gs://bucket_id/object_id/cloud_project_name-session_id - string annotation_results_uri = 3; -} - -// Config for STREAMING_AUTOML_CLASSIFICATION. -message StreamingAutomlClassificationConfig { - // Resource name of AutoML model. - // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` - string model_name = 1; -} - -// Config for STREAMING_AUTOML_OBJECT_TRACKING. -message StreamingAutomlObjectTrackingConfig { - // Resource name of AutoML model. - // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` - string model_name = 1; -} - -// Config for STREAMING_EXPLICIT_CONTENT_DETECTION. -message StreamingExplicitContentDetectionConfig {} - -// Config for STREAMING_LABEL_DETECTION. -message StreamingLabelDetectionConfig { - // Whether the video has been captured from a stationary (i.e. non-moving) - // camera. When set to true, might improve detection accuracy for moving - // objects. Default: false. - bool stationary_camera = 1; -} - -// Config for STREAMING_OBJECT_TRACKING. -message StreamingObjectTrackingConfig {} - -// Config for STREAMING_SHOT_CHANGE_DETECTION. -message StreamingShotChangeDetectionConfig {} - -// Config for streaming storage option. -message StreamingStorageConfig { - // Enable streaming storage. Default: false. - bool enable_storage_annotation_result = 1; - - // GCS URI to store all annotation results for one client. Client should - // specify this field as the top-level storage directory. Annotation results - // of different sessions will be put into different sub-directories denoted - // by project_name and session_id. All sub-directories will be auto generated - // by program and will be made accessible to client in response proto. - // URIs must be specified in the following format: `gs://bucket-id/object-id` - // `bucket-id` should be a valid GCS bucket created by client and bucket - // permission shall also be configured properly. `object-id` can be arbitrary - // string that make sense to client. Other URI formats will return error and - // cause GCS write failure. - string annotation_result_storage_directory = 3; -} - -// Streaming annotation results corresponding to a portion of the video -// that is currently being processed. -message StreamingVideoAnnotationResults { - // Shot annotation results. Each shot is represented as a video segment. - repeated VideoSegment shot_annotations = 1; - - // Label annotation results. - repeated LabelAnnotation label_annotations = 2; - - // Explicit content annotation results. - ExplicitContentAnnotation explicit_annotation = 3; - - // Object tracking results. - repeated ObjectTrackingAnnotation object_annotations = 4; -} - // Provides information to the annotator that specifies how to process the // request. message StreamingVideoConfig { - // Requested annotation feature. - StreamingFeature feature = 1; - // Config for requested annotation feature. oneof streaming_config { // Config for STREAMING_SHOT_CHANGE_DETECTION. @@ -964,6 +979,10 @@ message StreamingVideoConfig { // Config for STREAMING_OBJECT_TRACKING. StreamingObjectTrackingConfig object_tracking_config = 5; + // Config for STREAMING_AUTOML_ACTION_RECOGNITION. + StreamingAutomlActionRecognitionConfig automl_action_recognition_config = + 23; + // Config for STREAMING_AUTOML_CLASSIFICATION. StreamingAutomlClassificationConfig automl_classification_config = 21; @@ -971,102 +990,100 @@ message StreamingVideoConfig { StreamingAutomlObjectTrackingConfig automl_object_tracking_config = 22; } + // Requested annotation feature. + StreamingFeature feature = 1; + // Streaming storage option. By default: storage is disabled. StreamingStorageConfig storage_config = 30; } -// Video annotation feature. -enum Feature { - // Unspecified. - FEATURE_UNSPECIFIED = 0; - - // Label detection. Detect objects, such as dog or flower. - LABEL_DETECTION = 1; - - // Shot change detection. - SHOT_CHANGE_DETECTION = 2; - - // Explicit content detection. - EXPLICIT_CONTENT_DETECTION = 3; - - // Human face detection. - FACE_DETECTION = 4; - - // Speech transcription. - SPEECH_TRANSCRIPTION = 6; - - // OCR text detection and tracking. - TEXT_DETECTION = 7; - - // Object detection and tracking. - OBJECT_TRACKING = 9; - - // Logo detection, tracking, and recognition. - LOGO_RECOGNITION = 12; +// `StreamingAnnotateVideoResponse` is the only message returned to the client +// by `StreamingAnnotateVideo`. A series of zero or more +// `StreamingAnnotateVideoResponse` messages are streamed back to the client. +message StreamingAnnotateVideoResponse { + // If set, returns a [google.rpc.Status][google.rpc.Status] message that + // specifies the error for the operation. + google.rpc.Status error = 1; - // Celebrity recognition. - CELEBRITY_RECOGNITION = 13; + // Streaming annotation results. + StreamingVideoAnnotationResults annotation_results = 2; - // Person detection. - PERSON_DETECTION = 14; + // Google Cloud Storage(GCS) URI that stores annotation results of one + // streaming session in JSON format. + // It is the annotation_result_storage_directory + // from the request followed by '/cloud_project_number-session_id'. + string annotation_results_uri = 3; } -// Label detection mode. -enum LabelDetectionMode { - // Unspecified. - LABEL_DETECTION_MODE_UNSPECIFIED = 0; +// Streaming annotation results corresponding to a portion of the video +// that is currently being processed. +message StreamingVideoAnnotationResults { + // Shot annotation results. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 1; - // Detect shot-level labels. - SHOT_MODE = 1; + // Label annotation results. + repeated LabelAnnotation label_annotations = 2; - // Detect frame-level labels. - FRAME_MODE = 2; + // Explicit content annotation results. + ExplicitContentAnnotation explicit_annotation = 3; - // Detect both shot-level and frame-level labels. - SHOT_AND_FRAME_MODE = 3; + // Object tracking results. + repeated ObjectTrackingAnnotation object_annotations = 4; } -// Bucketized representation of likelihood. -enum Likelihood { - // Unspecified likelihood. - LIKELIHOOD_UNSPECIFIED = 0; - - // Very unlikely. - VERY_UNLIKELY = 1; - - // Unlikely. - UNLIKELY = 2; - - // Possible. - POSSIBLE = 3; - - // Likely. - LIKELY = 4; +// Config for STREAMING_SHOT_CHANGE_DETECTION. +message StreamingShotChangeDetectionConfig {} - // Very likely. - VERY_LIKELY = 5; +// Config for STREAMING_LABEL_DETECTION. +message StreamingLabelDetectionConfig { + // Whether the video has been captured from a stationary (i.e. non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Default: false. + bool stationary_camera = 1; } -// Streaming video annotation feature. -enum StreamingFeature { - // Unspecified. - STREAMING_FEATURE_UNSPECIFIED = 0; +// Config for STREAMING_EXPLICIT_CONTENT_DETECTION. +message StreamingExplicitContentDetectionConfig {} - // Label detection. Detect objects, such as dog or flower. - STREAMING_LABEL_DETECTION = 1; +// Config for STREAMING_OBJECT_TRACKING. +message StreamingObjectTrackingConfig {} - // Shot change detection. - STREAMING_SHOT_CHANGE_DETECTION = 2; +// Config for STREAMING_AUTOML_ACTION_RECOGNITION. +message StreamingAutomlActionRecognitionConfig { + // Resource name of AutoML model. + // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` + string model_name = 1; +} - // Explicit content detection. - STREAMING_EXPLICIT_CONTENT_DETECTION = 3; +// Config for STREAMING_AUTOML_CLASSIFICATION. +message StreamingAutomlClassificationConfig { + // Resource name of AutoML model. + // Format: + // `projects/{project_number}/locations/{location_id}/models/{model_id}` + string model_name = 1; +} - // Object detection and tracking. - STREAMING_OBJECT_TRACKING = 4; +// Config for STREAMING_AUTOML_OBJECT_TRACKING. +message StreamingAutomlObjectTrackingConfig { + // Resource name of AutoML model. + // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` + string model_name = 1; +} - // Video classification based on AutoML model. - STREAMING_AUTOML_CLASSIFICATION = 21; +// Config for streaming storage option. +message StreamingStorageConfig { + // Enable streaming storage. Default: false. + bool enable_storage_annotation_result = 1; - // Object detection and tracking based on AutoML model. - STREAMING_AUTOML_OBJECT_TRACKING = 22; + // Cloud Storage URI to store all annotation results for one client. Client + // should specify this field as the top-level storage directory. Annotation + // results of different sessions will be put into different sub-directories + // denoted by project_name and session_id. All sub-directories will be auto + // generated by program and will be made accessible to client in response + // proto. URIs must be specified in the following format: + // `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage + // bucket created by client and bucket permission shall also be configured + // properly. `object-id` can be arbitrary string that make sense to client. + // Other URI formats will return error and cause Cloud Storage write failure. + string annotation_result_storage_directory = 3; } diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index 8f67f8d3..76eeb5ab 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -29,7 +29,7 @@ package="google.cloud.videointelligence.v1p3beta1", syntax="proto3", serialized_options=b"\n,com.google.cloud.videointelligence.v1p3beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P3Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p3beta1", - serialized_pb=b'\nGgoogle/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p3beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x80\x07\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig\x12\\\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig\x12`\n\x17person_detection_config\x18\x0b \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig\x12^\n\x16object_tracking_config\x18\r \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig"\xe4\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p3beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p3beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p3beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcf\x02\n\x11TimestampedObject\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12R\n\tlandmarks\x18\x04 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.DetectedLandmarkB\x03\xe0\x41\x01"\x99\x02\n\x05Track\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12X\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.TimestampedObject\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"D\n\tCelebrity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t"\xab\x02\n\x0e\x43\x65lebrityTrack\x12\x61\n\x0b\x63\x65lebrities\x18\x01 \x03(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity\x12\x43\n\nface_track\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x1aq\n\x13RecognizedCelebrity\x12\x46\n\tcelebrity\x18\x01 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.Celebrity\x12\x12\n\nconfidence\x18\x02 \x01(\x02"t\n\x1e\x43\x65lebrityRecognitionAnnotation\x12R\n\x10\x63\x65lebrity_tracks\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.CelebrityTrack"\x7f\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12I\n\x05point\x18\x02 \x01(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"m\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12?\n\x06tracks\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12\x11\n\tthumbnail\x18\x04 \x01(\x0c"\\\n\x19PersonDetectionAnnotation\x12?\n\x06tracks\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track"\xef\x0b\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12G\n\x07segment\x18\n \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x62\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.SpeechTranscription\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation\x12i\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation\x12i\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation\x12s\n!celebrity_recognition_annotations\x18\x15 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults"\xb4\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.Feature\x12G\n\x07segment\x18\x06 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress"\x88\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p3beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x8f\x01\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x93\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x46\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p3beta1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p3beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xac\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame\x12I\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x42\x0c\n\ntrack_info"\xe8\x01\n\x19LogoRecognitionAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12?\n\x06tracks\x18\x02 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"\xa5\x01\n\x1dStreamingAnnotateVideoRequest\x12V\n\x0cvideo_config\x18\x01 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfigH\x00\x12\x17\n\rinput_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request"\xca\x01\n\x1eStreamingAnnotateVideoResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x65\n\x12\x61nnotation_results\x18\x02 \x01(\x0b\x32I.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults\x12\x1e\n\x16\x61nnotation_results_uri\x18\x03 \x01(\t"9\n#StreamingAutomlClassificationConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"9\n#StreamingAutomlObjectTrackingConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t")\n\'StreamingExplicitContentDetectionConfig":\n\x1dStreamingLabelDetectionConfig\x12\x19\n\x11stationary_camera\x18\x01 \x01(\x08"\x1f\n\x1dStreamingObjectTrackingConfig"$\n"StreamingShotChangeDetectionConfig"o\n\x16StreamingStorageConfig\x12(\n enable_storage_annotation_result\x18\x01 \x01(\x08\x12+\n#annotation_result_storage_directory\x18\x03 \x01(\t"\x8b\x03\n\x1fStreamingVideoAnnotationResults\x12P\n\x10shot_annotations\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12T\n\x11label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12`\n\x13\x65xplicit_annotation\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12^\n\x12object_annotations\x18\x04 \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation"\x8c\x07\n\x14StreamingVideoConfig\x12K\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1p3beta1.StreamingFeature\x12t\n\x1cshot_change_detection_config\x18\x02 \x01(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfigH\x00\x12i\n\x16label_detection_config\x18\x03 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfigH\x00\x12~\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32Q.google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfigH\x00\x12i\n\x16object_tracking_config\x18\x05 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfigH\x00\x12u\n\x1c\x61utoml_classification_config\x18\x15 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfigH\x00\x12v\n\x1d\x61utoml_object_tracking_config\x18\x16 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfigH\x00\x12X\n\x0estorage_config\x18\x1e \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfigB\x12\n\x10streaming_config*\x90\x02\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x19\n\x15\x43\x45LEBRITY_RECOGNITION\x10\r\x12\x14\n\x10PERSON_DETECTION\x10\x0e*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05*\x8d\x02\n\x10StreamingFeature\x12!\n\x1dSTREAMING_FEATURE_UNSPECIFIED\x10\x00\x12\x1d\n\x19STREAMING_LABEL_DETECTION\x10\x01\x12#\n\x1fSTREAMING_SHOT_CHANGE_DETECTION\x10\x02\x12(\n$STREAMING_EXPLICIT_CONTENT_DETECTION\x10\x03\x12\x1d\n\x19STREAMING_OBJECT_TRACKING\x10\x04\x12#\n\x1fSTREAMING_AUTOML_CLASSIFICATION\x10\x15\x12$\n STREAMING_AUTOML_OBJECT_TRACKING\x10\x16\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p3beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platform2\xad\x02\n!StreamingVideoIntelligenceService\x12\xb1\x01\n\x16StreamingAnnotateVideo\x12G.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest\x1aH.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse"\x00(\x01\x30\x01\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x80\x02\n,com.google.cloud.videointelligence.v1p3beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P3Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p3beta1b\x06proto3', + serialized_pb=b'\nGgoogle/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p3beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x80\x07\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig\x12\\\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig\x12`\n\x17person_detection_config\x18\x0b \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig\x12^\n\x16object_tracking_config\x18\r \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig"\xe4\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p3beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p3beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p3beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcf\x02\n\x11TimestampedObject\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12R\n\tlandmarks\x18\x04 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.DetectedLandmarkB\x03\xe0\x41\x01"\x99\x02\n\x05Track\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12X\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.TimestampedObject\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"D\n\tCelebrity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t"\xab\x02\n\x0e\x43\x65lebrityTrack\x12\x61\n\x0b\x63\x65lebrities\x18\x01 \x03(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity\x12\x43\n\nface_track\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x1aq\n\x13RecognizedCelebrity\x12\x46\n\tcelebrity\x18\x01 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.Celebrity\x12\x12\n\nconfidence\x18\x02 \x01(\x02"t\n\x1e\x43\x65lebrityRecognitionAnnotation\x12R\n\x10\x63\x65lebrity_tracks\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.CelebrityTrack"\x7f\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12I\n\x05point\x18\x02 \x01(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"m\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12?\n\x06tracks\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12\x11\n\tthumbnail\x18\x04 \x01(\x0c"\\\n\x19PersonDetectionAnnotation\x12?\n\x06tracks\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track"\xef\x0b\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12G\n\x07segment\x18\n \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x62\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.SpeechTranscription\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation\x12i\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation\x12i\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation\x12s\n!celebrity_recognition_annotations\x18\x15 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults"\xb4\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.Feature\x12G\n\x07segment\x18\x06 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress"\x88\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p3beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x8f\x01\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x93\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x46\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p3beta1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p3beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xac\x02\n\x18ObjectTrackingAnnotation\x12I\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameB\x0c\n\ntrack_info"\xe8\x01\n\x19LogoRecognitionAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12?\n\x06tracks\x18\x02 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"\xa5\x01\n\x1dStreamingAnnotateVideoRequest\x12V\n\x0cvideo_config\x18\x01 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfigH\x00\x12\x17\n\rinput_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request"\x8a\x08\n\x14StreamingVideoConfig\x12t\n\x1cshot_change_detection_config\x18\x02 \x01(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfigH\x00\x12i\n\x16label_detection_config\x18\x03 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfigH\x00\x12~\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32Q.google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfigH\x00\x12i\n\x16object_tracking_config\x18\x05 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfigH\x00\x12|\n automl_action_recognition_config\x18\x17 \x01(\x0b\x32P.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfigH\x00\x12u\n\x1c\x61utoml_classification_config\x18\x15 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfigH\x00\x12v\n\x1d\x61utoml_object_tracking_config\x18\x16 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfigH\x00\x12K\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1p3beta1.StreamingFeature\x12X\n\x0estorage_config\x18\x1e \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfigB\x12\n\x10streaming_config"\xca\x01\n\x1eStreamingAnnotateVideoResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x65\n\x12\x61nnotation_results\x18\x02 \x01(\x0b\x32I.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults\x12\x1e\n\x16\x61nnotation_results_uri\x18\x03 \x01(\t"\x8b\x03\n\x1fStreamingVideoAnnotationResults\x12P\n\x10shot_annotations\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12T\n\x11label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12`\n\x13\x65xplicit_annotation\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12^\n\x12object_annotations\x18\x04 \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation"$\n"StreamingShotChangeDetectionConfig":\n\x1dStreamingLabelDetectionConfig\x12\x19\n\x11stationary_camera\x18\x01 \x01(\x08")\n\'StreamingExplicitContentDetectionConfig"\x1f\n\x1dStreamingObjectTrackingConfig"<\n&StreamingAutomlActionRecognitionConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"9\n#StreamingAutomlClassificationConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"9\n#StreamingAutomlObjectTrackingConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"o\n\x16StreamingStorageConfig\x12(\n enable_storage_annotation_result\x18\x01 \x01(\x08\x12+\n#annotation_result_storage_directory\x18\x03 \x01(\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05*\xb6\x02\n\x10StreamingFeature\x12!\n\x1dSTREAMING_FEATURE_UNSPECIFIED\x10\x00\x12\x1d\n\x19STREAMING_LABEL_DETECTION\x10\x01\x12#\n\x1fSTREAMING_SHOT_CHANGE_DETECTION\x10\x02\x12(\n$STREAMING_EXPLICIT_CONTENT_DETECTION\x10\x03\x12\x1d\n\x19STREAMING_OBJECT_TRACKING\x10\x04\x12\'\n#STREAMING_AUTOML_ACTION_RECOGNITION\x10\x17\x12#\n\x1fSTREAMING_AUTOML_CLASSIFICATION\x10\x15\x12$\n STREAMING_AUTOML_OBJECT_TRACKING\x10\x16*\x90\x02\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x19\n\x15\x43\x45LEBRITY_RECOGNITION\x10\r\x12\x14\n\x10PERSON_DETECTION\x10\x0e\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p3beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platform2\xad\x02\n!StreamingVideoIntelligenceService\x12\xb1\x01\n\x16StreamingAnnotateVideo\x12G.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest\x1aH.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse"\x00(\x01\x30\x01\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x80\x02\n,com.google.cloud.videointelligence.v1p3beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P3Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p3beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -41,90 +41,6 @@ ], ) -_FEATURE = _descriptor.EnumDescriptor( - name="Feature", - full_name="google.cloud.videointelligence.v1p3beta1.Feature", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="FEATURE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="LABEL_DETECTION", - index=1, - number=1, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="SHOT_CHANGE_DETECTION", - index=2, - number=2, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="EXPLICIT_CONTENT_DETECTION", - index=3, - number=3, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="FACE_DETECTION", index=4, number=4, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="SPEECH_TRANSCRIPTION", - index=5, - number=6, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="TEXT_DETECTION", index=6, number=7, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="OBJECT_TRACKING", - index=7, - number=9, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="LOGO_RECOGNITION", - index=8, - number=12, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="CELEBRITY_RECOGNITION", - index=9, - number=13, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="PERSON_DETECTION", - index=10, - number=14, - serialized_options=None, - type=None, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=11058, - serialized_end=11330, -) -_sym_db.RegisterEnumDescriptor(_FEATURE) - -Feature = enum_type_wrapper.EnumTypeWrapper(_FEATURE) _LABELDETECTIONMODE = _descriptor.EnumDescriptor( name="LabelDetectionMode", full_name="google.cloud.videointelligence.v1p3beta1.LabelDetectionMode", @@ -154,8 +70,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=11332, - serialized_end=11446, + serialized_start=11245, + serialized_end=11359, ) _sym_db.RegisterEnumDescriptor(_LABELDETECTIONMODE) @@ -191,8 +107,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=11448, - serialized_end=11564, + serialized_start=11361, + serialized_end=11477, ) _sym_db.RegisterEnumDescriptor(_LIKELIHOOD) @@ -239,15 +155,22 @@ type=None, ), _descriptor.EnumValueDescriptor( - name="STREAMING_AUTOML_CLASSIFICATION", + name="STREAMING_AUTOML_ACTION_RECOGNITION", index=5, + number=23, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="STREAMING_AUTOML_CLASSIFICATION", + index=6, number=21, serialized_options=None, type=None, ), _descriptor.EnumValueDescriptor( name="STREAMING_AUTOML_OBJECT_TRACKING", - index=6, + index=7, number=22, serialized_options=None, type=None, @@ -255,23 +178,96 @@ ], containing_type=None, serialized_options=None, - serialized_start=11567, - serialized_end=11836, + serialized_start=11480, + serialized_end=11790, ) _sym_db.RegisterEnumDescriptor(_STREAMINGFEATURE) StreamingFeature = enum_type_wrapper.EnumTypeWrapper(_STREAMINGFEATURE) -FEATURE_UNSPECIFIED = 0 -LABEL_DETECTION = 1 -SHOT_CHANGE_DETECTION = 2 -EXPLICIT_CONTENT_DETECTION = 3 -FACE_DETECTION = 4 -SPEECH_TRANSCRIPTION = 6 -TEXT_DETECTION = 7 -OBJECT_TRACKING = 9 -LOGO_RECOGNITION = 12 -CELEBRITY_RECOGNITION = 13 -PERSON_DETECTION = 14 +_FEATURE = _descriptor.EnumDescriptor( + name="Feature", + full_name="google.cloud.videointelligence.v1p3beta1.Feature", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="FEATURE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="LABEL_DETECTION", + index=1, + number=1, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="SHOT_CHANGE_DETECTION", + index=2, + number=2, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="EXPLICIT_CONTENT_DETECTION", + index=3, + number=3, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="FACE_DETECTION", index=4, number=4, serialized_options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="SPEECH_TRANSCRIPTION", + index=5, + number=6, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="TEXT_DETECTION", index=6, number=7, serialized_options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="OBJECT_TRACKING", + index=7, + number=9, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="LOGO_RECOGNITION", + index=8, + number=12, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="CELEBRITY_RECOGNITION", + index=9, + number=13, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="PERSON_DETECTION", + index=10, + number=14, + serialized_options=None, + type=None, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=11793, + serialized_end=12065, +) +_sym_db.RegisterEnumDescriptor(_FEATURE) + +Feature = enum_type_wrapper.EnumTypeWrapper(_FEATURE) LABEL_DETECTION_MODE_UNSPECIFIED = 0 SHOT_MODE = 1 FRAME_MODE = 2 @@ -287,8 +283,20 @@ STREAMING_SHOT_CHANGE_DETECTION = 2 STREAMING_EXPLICIT_CONTENT_DETECTION = 3 STREAMING_OBJECT_TRACKING = 4 +STREAMING_AUTOML_ACTION_RECOGNITION = 23 STREAMING_AUTOML_CLASSIFICATION = 21 STREAMING_AUTOML_OBJECT_TRACKING = 22 +FEATURE_UNSPECIFIED = 0 +LABEL_DETECTION = 1 +SHOT_CHANGE_DETECTION = 2 +EXPLICIT_CONTENT_DETECTION = 3 +FACE_DETECTION = 4 +SPEECH_TRANSCRIPTION = 6 +TEXT_DETECTION = 7 +OBJECT_TRACKING = 9 +LOGO_RECOGNITION = 12 +CELEBRITY_RECOGNITION = 13 +PERSON_DETECTION = 14 _ANNOTATEVIDEOREQUEST = _descriptor.Descriptor( @@ -3574,10 +3582,10 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name="entity", - full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.entity", + name="segment", + full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.segment", index=0, - number=1, + number=3, type=11, cpp_type=10, label=1, @@ -3592,15 +3600,15 @@ file=DESCRIPTOR, ), _descriptor.FieldDescriptor( - name="confidence", - full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.confidence", + name="track_id", + full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.track_id", index=1, - number=4, - type=2, - cpp_type=6, + number=5, + type=3, + cpp_type=2, label=1, has_default_value=False, - default_value=float(0), + default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -3610,15 +3618,15 @@ file=DESCRIPTOR, ), _descriptor.FieldDescriptor( - name="frames", - full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.frames", + name="entity", + full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.entity", index=2, - number=2, + number=1, type=11, cpp_type=10, - label=3, + label=1, has_default_value=False, - default_value=[], + default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -3628,15 +3636,15 @@ file=DESCRIPTOR, ), _descriptor.FieldDescriptor( - name="segment", - full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.segment", + name="confidence", + full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.confidence", index=3, - number=3, - type=11, - cpp_type=10, + number=4, + type=2, + cpp_type=6, label=1, has_default_value=False, - default_value=None, + default_value=float(0), message_type=None, enum_type=None, containing_type=None, @@ -3646,15 +3654,15 @@ file=DESCRIPTOR, ), _descriptor.FieldDescriptor( - name="track_id", - full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.track_id", + name="frames", + full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.frames", index=4, - number=5, - type=3, - cpp_type=2, - label=1, + number=2, + type=11, + cpp_type=10, + label=3, has_default_value=False, - default_value=0, + default_value=[], message_type=None, enum_type=None, containing_type=None, @@ -3825,18 +3833,18 @@ ) -_STREAMINGANNOTATEVIDEORESPONSE = _descriptor.Descriptor( - name="StreamingAnnotateVideoResponse", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse", +_STREAMINGVIDEOCONFIG = _descriptor.Descriptor( + name="StreamingVideoConfig", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name="error", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse.error", + name="shot_change_detection_config", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.shot_change_detection_config", index=0, - number=1, + number=2, type=11, cpp_type=10, label=1, @@ -3851,10 +3859,10 @@ file=DESCRIPTOR, ), _descriptor.FieldDescriptor( - name="annotation_results", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse.annotation_results", + name="label_detection_config", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.label_detection_config", index=1, - number=2, + number=3, type=11, cpp_type=10, label=1, @@ -3869,15 +3877,123 @@ file=DESCRIPTOR, ), _descriptor.FieldDescriptor( - name="annotation_results_uri", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse.annotation_results_uri", + name="explicit_content_detection_config", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.explicit_content_detection_config", index=2, - number=3, - type=9, - cpp_type=9, + number=4, + type=11, + cpp_type=10, label=1, has_default_value=False, - default_value=b"".decode("utf-8"), + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="object_tracking_config", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.object_tracking_config", + index=3, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="automl_action_recognition_config", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.automl_action_recognition_config", + index=4, + number=23, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="automl_classification_config", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.automl_classification_config", + index=5, + number=21, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="automl_object_tracking_config", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.automl_object_tracking_config", + index=6, + number=22, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="feature", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.feature", + index=7, + number=1, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="storage_config", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.storage_config", + index=8, + number=30, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -3894,24 +4010,68 @@ is_extendable=False, syntax="proto3", extension_ranges=[], - oneofs=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="streaming_config", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.streaming_config", + index=0, + containing_type=None, + fields=[], + ) + ], serialized_start=9139, - serialized_end=9341, + serialized_end=10173, ) -_STREAMINGAUTOMLCLASSIFICATIONCONFIG = _descriptor.Descriptor( - name="StreamingAutomlClassificationConfig", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig", +_STREAMINGANNOTATEVIDEORESPONSE = _descriptor.Descriptor( + name="StreamingAnnotateVideoResponse", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name="model_name", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig.model_name", + name="error", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse.error", index=0, number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="annotation_results", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse.annotation_results", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="annotation_results_uri", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse.annotation_results_uri", + index=2, + number=3, type=9, cpp_type=9, label=1, @@ -3924,7 +4084,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -3934,28 +4094,64 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=9343, - serialized_end=9400, + serialized_start=10176, + serialized_end=10378, ) -_STREAMINGAUTOMLOBJECTTRACKINGCONFIG = _descriptor.Descriptor( - name="StreamingAutomlObjectTrackingConfig", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig", +_STREAMINGVIDEOANNOTATIONRESULTS = _descriptor.Descriptor( + name="StreamingVideoAnnotationResults", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name="model_name", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig.model_name", + name="shot_annotations", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults.shot_annotations", index=0, number=1, - type=9, - cpp_type=9, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="label_annotations", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults.label_annotations", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="explicit_annotation", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults.explicit_annotation", + index=2, + number=3, + type=11, + cpp_type=10, label=1, has_default_value=False, - default_value=b"".decode("utf-8"), + default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -3963,7 +4159,25 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), + _descriptor.FieldDescriptor( + name="object_annotations", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults.object_annotations", + index=3, + number=4, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), ], extensions=[], nested_types=[], @@ -3973,14 +4187,14 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=9402, - serialized_end=9459, + serialized_start=10381, + serialized_end=10776, ) -_STREAMINGEXPLICITCONTENTDETECTIONCONFIG = _descriptor.Descriptor( - name="StreamingExplicitContentDetectionConfig", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig", +_STREAMINGSHOTCHANGEDETECTIONCONFIG = _descriptor.Descriptor( + name="StreamingShotChangeDetectionConfig", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig", filename=None, file=DESCRIPTOR, containing_type=None, @@ -3993,8 +4207,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=9461, - serialized_end=9502, + serialized_start=10778, + serialized_end=10814, ) @@ -4032,14 +4246,14 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=9504, - serialized_end=9562, + serialized_start=10816, + serialized_end=10874, ) -_STREAMINGOBJECTTRACKINGCONFIG = _descriptor.Descriptor( - name="StreamingObjectTrackingConfig", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig", +_STREAMINGEXPLICITCONTENTDETECTIONCONFIG = _descriptor.Descriptor( + name="StreamingExplicitContentDetectionConfig", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig", filename=None, file=DESCRIPTOR, containing_type=None, @@ -4052,14 +4266,14 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=9564, - serialized_end=9595, + serialized_start=10876, + serialized_end=10917, ) -_STREAMINGSHOTCHANGEDETECTIONCONFIG = _descriptor.Descriptor( - name="StreamingShotChangeDetectionConfig", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig", +_STREAMINGOBJECTTRACKINGCONFIG = _descriptor.Descriptor( + name="StreamingObjectTrackingConfig", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig", filename=None, file=DESCRIPTOR, containing_type=None, @@ -4072,28 +4286,28 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=9597, - serialized_end=9633, + serialized_start=10919, + serialized_end=10950, ) -_STREAMINGSTORAGECONFIG = _descriptor.Descriptor( - name="StreamingStorageConfig", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig", +_STREAMINGAUTOMLACTIONRECOGNITIONCONFIG = _descriptor.Descriptor( + name="StreamingAutomlActionRecognitionConfig", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name="enable_storage_annotation_result", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig.enable_storage_annotation_result", + name="model_name", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig.model_name", index=0, number=1, - type=8, - cpp_type=7, + type=9, + cpp_type=9, label=1, has_default_value=False, - default_value=False, + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -4101,12 +4315,33 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ), + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=10952, + serialized_end=11012, +) + + +_STREAMINGAUTOMLCLASSIFICATIONCONFIG = _descriptor.Descriptor( + name="StreamingAutomlClassificationConfig", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ _descriptor.FieldDescriptor( - name="annotation_result_storage_directory", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig.annotation_result_storage_directory", - index=1, - number=3, + name="model_name", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig.model_name", + index=0, + number=1, type=9, cpp_type=9, label=1, @@ -4119,7 +4354,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ), + ) ], extensions=[], nested_types=[], @@ -4129,82 +4364,28 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=9635, - serialized_end=9746, + serialized_start=11014, + serialized_end=11071, ) -_STREAMINGVIDEOANNOTATIONRESULTS = _descriptor.Descriptor( - name="StreamingVideoAnnotationResults", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults", +_STREAMINGAUTOMLOBJECTTRACKINGCONFIG = _descriptor.Descriptor( + name="StreamingAutomlObjectTrackingConfig", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name="shot_annotations", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults.shot_annotations", + name="model_name", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig.model_name", index=0, number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="label_annotations", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults.label_annotations", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="explicit_annotation", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults.explicit_annotation", - index=2, - number=3, - type=11, - cpp_type=10, + type=9, + cpp_type=9, label=1, has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="object_annotations", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults.object_annotations", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -4212,7 +4393,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ), + ) ], extensions=[], nested_types=[], @@ -4222,28 +4403,28 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=9749, - serialized_end=10144, + serialized_start=11073, + serialized_end=11130, ) -_STREAMINGVIDEOCONFIG = _descriptor.Descriptor( - name="StreamingVideoConfig", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig", +_STREAMINGSTORAGECONFIG = _descriptor.Descriptor( + name="StreamingStorageConfig", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name="feature", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.feature", + name="enable_storage_annotation_result", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig.enable_storage_annotation_result", index=0, number=1, - type=14, - cpp_type=8, + type=8, + cpp_type=7, label=1, has_default_value=False, - default_value=0, + default_value=False, message_type=None, enum_type=None, containing_type=None, @@ -4253,123 +4434,15 @@ file=DESCRIPTOR, ), _descriptor.FieldDescriptor( - name="shot_change_detection_config", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.shot_change_detection_config", + name="annotation_result_storage_directory", + full_name="google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig.annotation_result_storage_directory", index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="label_detection_config", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.label_detection_config", - index=2, number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="explicit_content_detection_config", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.explicit_content_detection_config", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="object_tracking_config", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.object_tracking_config", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="automl_classification_config", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.automl_classification_config", - index=5, - number=21, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="automl_object_tracking_config", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.automl_object_tracking_config", - index=6, - number=22, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="storage_config", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.storage_config", - index=7, - number=30, - type=11, - cpp_type=10, + type=9, + cpp_type=9, label=1, has_default_value=False, - default_value=None, + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -4386,17 +4459,9 @@ is_extendable=False, syntax="proto3", extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="streaming_config", - full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.streaming_config", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=10147, - serialized_end=11055, + oneofs=[], + serialized_start=11132, + serialized_end=11243, ) _ANNOTATEVIDEOREQUEST.fields_by_name["features"].enum_type = _FEATURE @@ -4558,9 +4623,9 @@ _OBJECTTRACKINGFRAME.fields_by_name[ "time_offset" ].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_OBJECTTRACKINGANNOTATION.fields_by_name["segment"].message_type = _VIDEOSEGMENT _OBJECTTRACKINGANNOTATION.fields_by_name["entity"].message_type = _ENTITY _OBJECTTRACKINGANNOTATION.fields_by_name["frames"].message_type = _OBJECTTRACKINGFRAME -_OBJECTTRACKINGANNOTATION.fields_by_name["segment"].message_type = _VIDEOSEGMENT _OBJECTTRACKINGANNOTATION.oneofs_by_name["track_info"].fields.append( _OBJECTTRACKINGANNOTATION.fields_by_name["segment"] ) @@ -4591,25 +4656,6 @@ _STREAMINGANNOTATEVIDEOREQUEST.fields_by_name[ "input_content" ].containing_oneof = _STREAMINGANNOTATEVIDEOREQUEST.oneofs_by_name["streaming_request"] -_STREAMINGANNOTATEVIDEORESPONSE.fields_by_name[ - "error" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_STREAMINGANNOTATEVIDEORESPONSE.fields_by_name[ - "annotation_results" -].message_type = _STREAMINGVIDEOANNOTATIONRESULTS -_STREAMINGVIDEOANNOTATIONRESULTS.fields_by_name[ - "shot_annotations" -].message_type = _VIDEOSEGMENT -_STREAMINGVIDEOANNOTATIONRESULTS.fields_by_name[ - "label_annotations" -].message_type = _LABELANNOTATION -_STREAMINGVIDEOANNOTATIONRESULTS.fields_by_name[ - "explicit_annotation" -].message_type = _EXPLICITCONTENTANNOTATION -_STREAMINGVIDEOANNOTATIONRESULTS.fields_by_name[ - "object_annotations" -].message_type = _OBJECTTRACKINGANNOTATION -_STREAMINGVIDEOCONFIG.fields_by_name["feature"].enum_type = _STREAMINGFEATURE _STREAMINGVIDEOCONFIG.fields_by_name[ "shot_change_detection_config" ].message_type = _STREAMINGSHOTCHANGEDETECTIONCONFIG @@ -4622,12 +4668,16 @@ _STREAMINGVIDEOCONFIG.fields_by_name[ "object_tracking_config" ].message_type = _STREAMINGOBJECTTRACKINGCONFIG +_STREAMINGVIDEOCONFIG.fields_by_name[ + "automl_action_recognition_config" +].message_type = _STREAMINGAUTOMLACTIONRECOGNITIONCONFIG _STREAMINGVIDEOCONFIG.fields_by_name[ "automl_classification_config" ].message_type = _STREAMINGAUTOMLCLASSIFICATIONCONFIG _STREAMINGVIDEOCONFIG.fields_by_name[ "automl_object_tracking_config" ].message_type = _STREAMINGAUTOMLOBJECTTRACKINGCONFIG +_STREAMINGVIDEOCONFIG.fields_by_name["feature"].enum_type = _STREAMINGFEATURE _STREAMINGVIDEOCONFIG.fields_by_name[ "storage_config" ].message_type = _STREAMINGSTORAGECONFIG @@ -4655,6 +4705,12 @@ _STREAMINGVIDEOCONFIG.fields_by_name[ "object_tracking_config" ].containing_oneof = _STREAMINGVIDEOCONFIG.oneofs_by_name["streaming_config"] +_STREAMINGVIDEOCONFIG.oneofs_by_name["streaming_config"].fields.append( + _STREAMINGVIDEOCONFIG.fields_by_name["automl_action_recognition_config"] +) +_STREAMINGVIDEOCONFIG.fields_by_name[ + "automl_action_recognition_config" +].containing_oneof = _STREAMINGVIDEOCONFIG.oneofs_by_name["streaming_config"] _STREAMINGVIDEOCONFIG.oneofs_by_name["streaming_config"].fields.append( _STREAMINGVIDEOCONFIG.fields_by_name["automl_classification_config"] ) @@ -4667,6 +4723,24 @@ _STREAMINGVIDEOCONFIG.fields_by_name[ "automl_object_tracking_config" ].containing_oneof = _STREAMINGVIDEOCONFIG.oneofs_by_name["streaming_config"] +_STREAMINGANNOTATEVIDEORESPONSE.fields_by_name[ + "error" +].message_type = google_dot_rpc_dot_status__pb2._STATUS +_STREAMINGANNOTATEVIDEORESPONSE.fields_by_name[ + "annotation_results" +].message_type = _STREAMINGVIDEOANNOTATIONRESULTS +_STREAMINGVIDEOANNOTATIONRESULTS.fields_by_name[ + "shot_annotations" +].message_type = _VIDEOSEGMENT +_STREAMINGVIDEOANNOTATIONRESULTS.fields_by_name[ + "label_annotations" +].message_type = _LABELANNOTATION +_STREAMINGVIDEOANNOTATIONRESULTS.fields_by_name[ + "explicit_annotation" +].message_type = _EXPLICITCONTENTANNOTATION +_STREAMINGVIDEOANNOTATIONRESULTS.fields_by_name[ + "object_annotations" +].message_type = _OBJECTTRACKINGANNOTATION DESCRIPTOR.message_types_by_name["AnnotateVideoRequest"] = _ANNOTATEVIDEOREQUEST DESCRIPTOR.message_types_by_name["VideoContext"] = _VIDEOCONTEXT DESCRIPTOR.message_types_by_name["LabelDetectionConfig"] = _LABELDETECTIONCONFIG @@ -4729,36 +4803,39 @@ DESCRIPTOR.message_types_by_name[ "StreamingAnnotateVideoRequest" ] = _STREAMINGANNOTATEVIDEOREQUEST +DESCRIPTOR.message_types_by_name["StreamingVideoConfig"] = _STREAMINGVIDEOCONFIG DESCRIPTOR.message_types_by_name[ "StreamingAnnotateVideoResponse" ] = _STREAMINGANNOTATEVIDEORESPONSE DESCRIPTOR.message_types_by_name[ - "StreamingAutomlClassificationConfig" -] = _STREAMINGAUTOMLCLASSIFICATIONCONFIG -DESCRIPTOR.message_types_by_name[ - "StreamingAutomlObjectTrackingConfig" -] = _STREAMINGAUTOMLOBJECTTRACKINGCONFIG + "StreamingVideoAnnotationResults" +] = _STREAMINGVIDEOANNOTATIONRESULTS DESCRIPTOR.message_types_by_name[ - "StreamingExplicitContentDetectionConfig" -] = _STREAMINGEXPLICITCONTENTDETECTIONCONFIG + "StreamingShotChangeDetectionConfig" +] = _STREAMINGSHOTCHANGEDETECTIONCONFIG DESCRIPTOR.message_types_by_name[ "StreamingLabelDetectionConfig" ] = _STREAMINGLABELDETECTIONCONFIG +DESCRIPTOR.message_types_by_name[ + "StreamingExplicitContentDetectionConfig" +] = _STREAMINGEXPLICITCONTENTDETECTIONCONFIG DESCRIPTOR.message_types_by_name[ "StreamingObjectTrackingConfig" ] = _STREAMINGOBJECTTRACKINGCONFIG DESCRIPTOR.message_types_by_name[ - "StreamingShotChangeDetectionConfig" -] = _STREAMINGSHOTCHANGEDETECTIONCONFIG -DESCRIPTOR.message_types_by_name["StreamingStorageConfig"] = _STREAMINGSTORAGECONFIG + "StreamingAutomlActionRecognitionConfig" +] = _STREAMINGAUTOMLACTIONRECOGNITIONCONFIG DESCRIPTOR.message_types_by_name[ - "StreamingVideoAnnotationResults" -] = _STREAMINGVIDEOANNOTATIONRESULTS -DESCRIPTOR.message_types_by_name["StreamingVideoConfig"] = _STREAMINGVIDEOCONFIG -DESCRIPTOR.enum_types_by_name["Feature"] = _FEATURE + "StreamingAutomlClassificationConfig" +] = _STREAMINGAUTOMLCLASSIFICATIONCONFIG +DESCRIPTOR.message_types_by_name[ + "StreamingAutomlObjectTrackingConfig" +] = _STREAMINGAUTOMLOBJECTTRACKINGCONFIG +DESCRIPTOR.message_types_by_name["StreamingStorageConfig"] = _STREAMINGSTORAGECONFIG DESCRIPTOR.enum_types_by_name["LabelDetectionMode"] = _LABELDETECTIONMODE DESCRIPTOR.enum_types_by_name["Likelihood"] = _LIKELIHOOD DESCRIPTOR.enum_types_by_name["StreamingFeature"] = _STREAMINGFEATURE +DESCRIPTOR.enum_types_by_name["Feature"] = _FEATURE _sym_db.RegisterFileDescriptor(DESCRIPTOR) AnnotateVideoRequest = _reflection.GeneratedProtocolMessageType( @@ -4772,21 +4849,21 @@ Attributes: input_uri: - Input video location. Currently, only `Google Cloud Storage - `__ URIs are supported, - which must be specified in the following format: - ``gs://bucket-id/object-id`` (other URI formats return [google - .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) - . For more information, see `Request URIs + Input video location. Currently, only `Cloud Storage + `__ URIs are supported. + URIs must be specified in the following format: ``gs://bucket- + id/object-id`` (other URI formats return [google.rpc.Code.INVA + LID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more + information, see `Request URIs `__. - A video URI may include wildcards in ``object-id``, and thus - identify multiple videos. Supported wildcards: ’*’ to match 0 - or more characters; ‘?’ to match 1 character. If unset, the - input video should be embedded in the request as - ``input_content``. If set, ``input_content`` should be unset. + To identify multiple videos, a video URI may include wildcards + in the ``object-id``. Supported wildcards: ’*’ to match 0 or + more characters; ‘?’ to match 1 character. If unset, the input + video should be embedded in the request as ``input_content``. + If set, ``input_content`` must be unset. input_content: The video data bytes. If unset, the input video(s) should be - specified via ``input_uri``. If set, ``input_uri`` should be + specified via the ``input_uri``. If set, ``input_uri`` must be unset. features: Required. Requested video annotation features. @@ -4794,18 +4871,18 @@ Additional video context and/or feature-specific parameters. output_uri: Optional. Location where the output (in JSON format) should be - stored. Currently, only `Google Cloud Storage - `__ URIs are supported, - which must be specified in the following format: + stored. Currently, only `Cloud Storage + `__ URIs are supported. + These must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google .rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]) . For more information, see `Request URIs `__. location_id: Optional. Cloud region where annotation should take place. - Supported cloud regions: ``us-east1``, ``us-west1``, ``europe- - west1``, ``asia-east1``. If no region is specified, a region - will be determined based on video file location. + Supported cloud regions are: ``us-east1``, ``us-west1``, + ``europe-west1``, ``asia-east1``. If no region is specified, + the region will be determined based on video file location. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest) }, @@ -4863,7 +4940,7 @@ addition to video-level labels or segment-level labels. If unspecified, defaults to ``SHOT_MODE``. stationary_camera: - Whether the video has been shot from a stationary (i.e. non- + Whether the video has been shot from a stationary (i.e., non- moving) camera. When set to true, might improve detection accuracy for moving objects. Should be used with ``SHOT_AND_FRAME_MODE`` enabled. @@ -4874,18 +4951,17 @@ The confidence threshold we perform filtering on the labels from frame-level detection. If not set, it is set to 0.4 by default. The valid range for this threshold is [0.1, 0.9]. Any - value set outside of this range will be clipped. Note: for - best results please follow the default threshold. We will - update the default threshold everytime when we release a new - model. + value set outside of this range will be clipped. Note: For + best results, follow the default threshold. We will update the + default threshold everytime when we release a new model. video_confidence_threshold: The confidence threshold we perform filtering on the labels - from video-level and shot-level detections. If not set, it is + from video-level and shot-level detections. If not set, it’s set to 0.3 by default. The valid range for this threshold is [0.1, 0.9]. Any value set outside of this range will be - clipped. Note: for best results please follow the default - threshold. We will update the default threshold everytime when - we release a new model. + clipped. Note: For best results, follow the default threshold. + We will update the default threshold everytime when we release + a new model. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig) }, @@ -4963,12 +5039,12 @@ Model to use for face detection. Supported values: “builtin/stable” (the default if unset) and “builtin/latest”. include_bounding_boxes: - Whether bounding boxes be included in the face annotation + Whether bounding boxes are included in the face annotation output. include_attributes: Whether to enable face attributes detection, such as glasses, dark_glasses, mouth_open etc. Ignored if - ‘include_bounding_boxes’ is false. + ‘include_bounding_boxes’ is set to false. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig) }, @@ -4986,17 +5062,16 @@ Attributes: include_bounding_boxes: - Whether bounding boxes be included in the person detection + Whether bounding boxes are included in the person detection annotation output. include_pose_landmarks: Whether to enable pose landmarks detection. Ignored if - ‘include_bounding_boxes’ is false. + ‘include_bounding_boxes’ is set to false. include_attributes: Whether to enable person attributes detection, such as cloth color (black, blue, etc), type (coat, dress, etc), pattern - (plain, floral, etc), hair color (black, blonde, etc), hair - length (long, short, bald), etc. Ignored if - ‘include_bounding_boxes’ is false. + (plain, floral, etc), hair, etc. Ignored if + ‘include_bounding_boxes’ is set to false. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig) }, @@ -5107,7 +5182,7 @@ Knowledge Graph Search API `__. description: - Textual description, e.g. ``Fixed-gear bicycle``. + Textual description, e.g., ``Fixed-gear bicycle``. language_code: Language code for ``description`` in BCP-47 format. """, @@ -5129,10 +5204,10 @@ entity: Detected entity. category_entities: - Common categories for the detected entity. E.g. when the label - is ``Terrier`` the category is likely ``dog``. And in some - cases there might be more than one categories e.g. ``Terrier`` - could also be a ``pet``. + Common categories for the detected entity. For example, when + the label is ``Terrier``, the category is likely ``dog``. And + in some cases there might be more than one categories e.g., + ``Terrier`` could also be a ``pet``. segments: All video segments where a label was detected. frames: @@ -5273,8 +5348,8 @@ Attributes: name: - The name of the attribute, i.e. glasses, dark_glasses, - mouth_open etc. A full list of supported type names will be + The name of the attribute, for example, glasses, dark_glasses, + mouth_open. A full list of supported type names will be provided in the document. confidence: Detected attribute confidence. Range [0, 1]. @@ -5385,7 +5460,8 @@ Attributes: name: - The name of this landmark, i.e. left_hand, right_shoulder. + The name of this landmark, for example, left_hand, + right_shoulder. point: The 2D point of the detected landmark using the normalized image coordindate system. The normalized coordinates have the @@ -5429,7 +5505,7 @@ Attributes: tracks: - The trackes that a person is detected. + The detected tracks of a person. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation) }, @@ -5447,16 +5523,16 @@ Attributes: input_uri: - Video file location in `Google Cloud Storage + Video file location in `Cloud Storage `__. segment: Video segment on which the annotation is run. segment_label_annotations: - Topical label annotations on video level or user specified + Topical label annotations on video level or user-specified segment level. There is exactly one element for each unique label. segment_presence_label_annotations: - Presence label annotations on video level or user specified + Presence label annotations on video level or user-specified segment level. There is exactly one element for each unique label. Compared to the existing topical ``segment_label_annotations``, this field presents more fine- @@ -5541,7 +5617,7 @@ Attributes: input_uri: - Video file location in `Google Cloud Storage + Video file location in `Cloud Storage `__. progress_percent: Approximate percentage processed thus far. Guaranteed to be @@ -5552,10 +5628,10 @@ Time of the most recent update. feature: Specifies which feature is being tracked if the request - contains more than one features. + contains more than one feature. segment: Specifies which segment is being tracked if the request - contains more than one segments. + contains more than one segment. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress) }, @@ -5635,9 +5711,9 @@ result using a speaker_tag provided in the WordInfo. Note: When this is true, we send all the words from the beginning of the audio for the top alternative in every consecutive - responses. This is done in order to improve our speaker tags - as our models learn to identify the speakers in the - conversation over time. + response. This is done in order to improve our speaker tags as + our models learn to identify the speakers in the conversation + over time. diarization_speaker_count: Optional. If set, specifies the estimated number of speakers in the conversation. If not set, defaults to ‘2’. Ignored @@ -5730,8 +5806,8 @@ words: Output only. A list of word-specific information for each recognized word. Note: When ``enable_speaker_diarization`` is - true, you will see all the words from the beginning of the - audio. + set to true, you will see all the words from the beginning of + the audio. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative) }, @@ -5932,17 +6008,6 @@ Attributes: - entity: - Entity to specify the object category that this track is - labeled as. - confidence: - Object category’s labeling confidence of this track. - frames: - Information corresponding to all frames where this object - track appears. Non-streaming batch mode: it may be one or - multiple ObjectTrackingFrame messages in frames. Streaming - mode: it can only be one ObjectTrackingFrame message in - frames. track_info: Different representation of tracking info in non-streaming batch and streaming modes. @@ -5956,6 +6021,17 @@ identifiable integer track_id so that the customers can correlate the results of the ongoing ObjectTrackAnnotation of the same track_id over time. + entity: + Entity to specify the object category that this track is + labeled as. + confidence: + Object category’s labeling confidence of this track. + frames: + Information corresponding to all frames where this object + track appears. Non-streaming batch mode: it may be one or + multiple ObjectTrackingFrame messages in frames. Streaming + mode: it can only be one ObjectTrackingFrame message in + frames. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation) }, @@ -6027,6 +6103,43 @@ ) _sym_db.RegisterMessage(StreamingAnnotateVideoRequest) +StreamingVideoConfig = _reflection.GeneratedProtocolMessageType( + "StreamingVideoConfig", + (_message.Message,), + { + "DESCRIPTOR": _STREAMINGVIDEOCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Provides information to the annotator that specifies how to process the + request. + + + Attributes: + streaming_config: + Config for requested annotation feature. + shot_change_detection_config: + Config for STREAMING_SHOT_CHANGE_DETECTION. + label_detection_config: + Config for STREAMING_LABEL_DETECTION. + explicit_content_detection_config: + Config for STREAMING_EXPLICIT_CONTENT_DETECTION. + object_tracking_config: + Config for STREAMING_OBJECT_TRACKING. + automl_action_recognition_config: + Config for STREAMING_AUTOML_ACTION_RECOGNITION. + automl_classification_config: + Config for STREAMING_AUTOML_CLASSIFICATION. + automl_object_tracking_config: + Config for STREAMING_AUTOML_OBJECT_TRACKING. + feature: + Requested annotation feature. + storage_config: + Streaming storage option. By default: storage is disabled. + """, + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig) + }, +) +_sym_db.RegisterMessage(StreamingVideoConfig) + StreamingAnnotateVideoResponse = _reflection.GeneratedProtocolMessageType( "StreamingAnnotateVideoResponse", (_message.Message,), @@ -6046,67 +6159,55 @@ annotation_results: Streaming annotation results. annotation_results_uri: - GCS URI that stores annotation results of one streaming - session. It is a directory that can hold multiple files in - JSON format. Example uri format: - gs://bucket_id/object_id/cloud_project_name-session_id + Google Cloud Storage(GCS) URI that stores annotation results + of one streaming session in JSON format. It is the + annotation_result_storage_directory from the request followed + by ‘/cloud_project_number-session_id’. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse) }, ) _sym_db.RegisterMessage(StreamingAnnotateVideoResponse) -StreamingAutomlClassificationConfig = _reflection.GeneratedProtocolMessageType( - "StreamingAutomlClassificationConfig", - (_message.Message,), - { - "DESCRIPTOR": _STREAMINGAUTOMLCLASSIFICATIONCONFIG, - "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Config for STREAMING_AUTOML_CLASSIFICATION. - - - Attributes: - model_name: - Resource name of AutoML model. Format: ``projects/{project_id} - /locations/{location_id}/models/{model_id}`` - """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig) - }, -) -_sym_db.RegisterMessage(StreamingAutomlClassificationConfig) - -StreamingAutomlObjectTrackingConfig = _reflection.GeneratedProtocolMessageType( - "StreamingAutomlObjectTrackingConfig", +StreamingVideoAnnotationResults = _reflection.GeneratedProtocolMessageType( + "StreamingVideoAnnotationResults", (_message.Message,), { - "DESCRIPTOR": _STREAMINGAUTOMLOBJECTTRACKINGCONFIG, + "DESCRIPTOR": _STREAMINGVIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Config for STREAMING_AUTOML_OBJECT_TRACKING. + "__doc__": """Streaming annotation results corresponding to a portion of + the video that is currently being processed. Attributes: - model_name: - Resource name of AutoML model. Format: ``projects/{project_id} - /locations/{location_id}/models/{model_id}`` + shot_annotations: + Shot annotation results. Each shot is represented as a video + segment. + label_annotations: + Label annotation results. + explicit_annotation: + Explicit content annotation results. + object_annotations: + Object tracking results. """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig) + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults) }, ) -_sym_db.RegisterMessage(StreamingAutomlObjectTrackingConfig) +_sym_db.RegisterMessage(StreamingVideoAnnotationResults) -StreamingExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( - "StreamingExplicitContentDetectionConfig", +StreamingShotChangeDetectionConfig = _reflection.GeneratedProtocolMessageType( + "StreamingShotChangeDetectionConfig", (_message.Message,), { - "DESCRIPTOR": _STREAMINGEXPLICITCONTENTDETECTIONCONFIG, + "DESCRIPTOR": _STREAMINGSHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Config for STREAMING_EXPLICIT_CONTENT_DETECTION. + "__doc__": """Config for STREAMING_SHOT_CHANGE_DETECTION. """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig) + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig) }, ) -_sym_db.RegisterMessage(StreamingExplicitContentDetectionConfig) +_sym_db.RegisterMessage(StreamingShotChangeDetectionConfig) StreamingLabelDetectionConfig = _reflection.GeneratedProtocolMessageType( "StreamingLabelDetectionConfig", @@ -6128,6 +6229,20 @@ ) _sym_db.RegisterMessage(StreamingLabelDetectionConfig) +StreamingExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( + "StreamingExplicitContentDetectionConfig", + (_message.Message,), + { + "DESCRIPTOR": _STREAMINGEXPLICITCONTENTDETECTIONCONFIG, + "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + "__doc__": """Config for STREAMING_EXPLICIT_CONTENT_DETECTION. + + """, + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig) + }, +) +_sym_db.RegisterMessage(StreamingExplicitContentDetectionConfig) + StreamingObjectTrackingConfig = _reflection.GeneratedProtocolMessageType( "StreamingObjectTrackingConfig", (_message.Message,), @@ -6142,111 +6257,94 @@ ) _sym_db.RegisterMessage(StreamingObjectTrackingConfig) -StreamingShotChangeDetectionConfig = _reflection.GeneratedProtocolMessageType( - "StreamingShotChangeDetectionConfig", +StreamingAutomlActionRecognitionConfig = _reflection.GeneratedProtocolMessageType( + "StreamingAutomlActionRecognitionConfig", (_message.Message,), { - "DESCRIPTOR": _STREAMINGSHOTCHANGEDETECTIONCONFIG, + "DESCRIPTOR": _STREAMINGAUTOMLACTIONRECOGNITIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Config for STREAMING_SHOT_CHANGE_DETECTION. + "__doc__": """Config for STREAMING_AUTOML_ACTION_RECOGNITION. + + Attributes: + model_name: + Resource name of AutoML model. Format: ``projects/{project_id} + /locations/{location_id}/models/{model_id}`` """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig) + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig) }, ) -_sym_db.RegisterMessage(StreamingShotChangeDetectionConfig) +_sym_db.RegisterMessage(StreamingAutomlActionRecognitionConfig) -StreamingStorageConfig = _reflection.GeneratedProtocolMessageType( - "StreamingStorageConfig", +StreamingAutomlClassificationConfig = _reflection.GeneratedProtocolMessageType( + "StreamingAutomlClassificationConfig", (_message.Message,), { - "DESCRIPTOR": _STREAMINGSTORAGECONFIG, + "DESCRIPTOR": _STREAMINGAUTOMLCLASSIFICATIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Config for streaming storage option. + "__doc__": """Config for STREAMING_AUTOML_CLASSIFICATION. Attributes: - enable_storage_annotation_result: - Enable streaming storage. Default: false. - annotation_result_storage_directory: - GCS URI to store all annotation results for one client. Client - should specify this field as the top-level storage directory. - Annotation results of different sessions will be put into - different sub-directories denoted by project_name and - session_id. All sub-directories will be auto generated by - program and will be made accessible to client in response - proto. URIs must be specified in the following format: - ``gs://bucket-id/object-id`` ``bucket-id`` should be a valid - GCS bucket created by client and bucket permission shall also - be configured properly. ``object-id`` can be arbitrary string - that make sense to client. Other URI formats will return error - and cause GCS write failure. + model_name: + Resource name of AutoML model. Format: ``projects/{project_num + ber}/locations/{location_id}/models/{model_id}`` """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig) + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig) }, ) -_sym_db.RegisterMessage(StreamingStorageConfig) +_sym_db.RegisterMessage(StreamingAutomlClassificationConfig) -StreamingVideoAnnotationResults = _reflection.GeneratedProtocolMessageType( - "StreamingVideoAnnotationResults", +StreamingAutomlObjectTrackingConfig = _reflection.GeneratedProtocolMessageType( + "StreamingAutomlObjectTrackingConfig", (_message.Message,), { - "DESCRIPTOR": _STREAMINGVIDEOANNOTATIONRESULTS, + "DESCRIPTOR": _STREAMINGAUTOMLOBJECTTRACKINGCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Streaming annotation results corresponding to a portion of - the video that is currently being processed. + "__doc__": """Config for STREAMING_AUTOML_OBJECT_TRACKING. Attributes: - shot_annotations: - Shot annotation results. Each shot is represented as a video - segment. - label_annotations: - Label annotation results. - explicit_annotation: - Explicit content annotation results. - object_annotations: - Object tracking results. + model_name: + Resource name of AutoML model. Format: ``projects/{project_id} + /locations/{location_id}/models/{model_id}`` """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults) + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig) }, ) -_sym_db.RegisterMessage(StreamingVideoAnnotationResults) +_sym_db.RegisterMessage(StreamingAutomlObjectTrackingConfig) -StreamingVideoConfig = _reflection.GeneratedProtocolMessageType( - "StreamingVideoConfig", +StreamingStorageConfig = _reflection.GeneratedProtocolMessageType( + "StreamingStorageConfig", (_message.Message,), { - "DESCRIPTOR": _STREAMINGVIDEOCONFIG, + "DESCRIPTOR": _STREAMINGSTORAGECONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Provides information to the annotator that specifies how - to process the request. + "__doc__": """Config for streaming storage option. Attributes: - feature: - Requested annotation feature. - streaming_config: - Config for requested annotation feature. - shot_change_detection_config: - Config for STREAMING_SHOT_CHANGE_DETECTION. - label_detection_config: - Config for STREAMING_LABEL_DETECTION. - explicit_content_detection_config: - Config for STREAMING_EXPLICIT_CONTENT_DETECTION. - object_tracking_config: - Config for STREAMING_OBJECT_TRACKING. - automl_classification_config: - Config for STREAMING_AUTOML_CLASSIFICATION. - automl_object_tracking_config: - Config for STREAMING_AUTOML_OBJECT_TRACKING. - storage_config: - Streaming storage option. By default: storage is disabled. + enable_storage_annotation_result: + Enable streaming storage. Default: false. + annotation_result_storage_directory: + Cloud Storage URI to store all annotation results for one + client. Client should specify this field as the top-level + storage directory. Annotation results of different sessions + will be put into different sub-directories denoted by + project_name and session_id. All sub-directories will be auto + generated by program and will be made accessible to client in + response proto. URIs must be specified in the following + format: ``gs://bucket-id/object-id`` ``bucket-id`` should be a + valid Cloud Storage bucket created by client and bucket + permission shall also be configured properly. ``object-id`` + can be arbitrary string that make sense to client. Other URI + formats will return error and cause Cloud Storage write + failure. """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig) + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig) }, ) -_sym_db.RegisterMessage(StreamingVideoConfig) +_sym_db.RegisterMessage(StreamingStorageConfig) DESCRIPTOR._options = None @@ -6281,8 +6379,8 @@ file=DESCRIPTOR, index=0, serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - serialized_start=11839, - serialized_end=12173, + serialized_start=12068, + serialized_end=12402, methods=[ _descriptor.MethodDescriptor( name="AnnotateVideo", @@ -6306,8 +6404,8 @@ file=DESCRIPTOR, index=1, serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - serialized_start=12176, - serialized_end=12477, + serialized_start=12405, + serialized_end=12706, methods=[ _descriptor.MethodDescriptor( name="StreamingAnnotateVideo", diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py index 4e25eb64..933b586c 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py @@ -10,7 +10,7 @@ class VideoIntelligenceServiceStub(object): - """Service that implements Google Cloud Video Intelligence API. + """Service that implements the Video Intelligence API. """ def __init__(self, channel): @@ -27,7 +27,7 @@ def __init__(self, channel): class VideoIntelligenceServiceServicer(object): - """Service that implements Google Cloud Video Intelligence API. + """Service that implements the Video Intelligence API. """ def AnnotateVideo(self, request, context): @@ -57,7 +57,7 @@ def add_VideoIntelligenceServiceServicer_to_server(servicer, server): class StreamingVideoIntelligenceServiceStub(object): - """Service that implements streaming Google Cloud Video Intelligence API. + """Service that implements streaming Video Intelligence API. """ def __init__(self, channel): @@ -74,7 +74,7 @@ def __init__(self, channel): class StreamingVideoIntelligenceServiceServicer(object): - """Service that implements streaming Google Cloud Video Intelligence API. + """Service that implements streaming Video Intelligence API. """ def StreamingAnnotateVideo(self, request_iterator, context): diff --git a/synth.metadata b/synth.metadata index 81c88a64..2822004b 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "5a90d467aa65e7f038f87585e8fbb45d74475e7c", - "internalRef": "312088359" + "sha": "d1a9f02fd4fb263bae0383b4a5af0bbef33753d6", + "internalRef": "312101156" } }, { From 611b7e475c2fa725b18a495016e39659015b20bd Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:05:42 -0700 Subject: [PATCH 07/17] fix: point artman at gapic v1 for monitoring/v3 API Committer: @miraleung PiperOrigin-RevId: 312104480 Source-Author: Google APIs Source-Date: Mon May 18 10:19:39 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: e94ad376529fabdfd2fce00407926a0cefd38b9e Source-Link: https://github.com/googleapis/googleapis/commit/e94ad376529fabdfd2fce00407926a0cefd38b9e --- .../proto/video_intelligence_pb2.py | 10 +++++----- synth.metadata | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index 76eeb5ab..ea092a46 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -5937,9 +5937,9 @@ { "DESCRIPTOR": _TEXTFRAME, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Video frame level annotation results for text annotation - (OCR). Contains information regarding timestamp and bounding box - locations for the frames containing detected OCR text snippets. + "__doc__": """Video frame level annotation results for text annotation (OCR). Contains + information regarding timestamp and bounding box locations for the + frames containing detected OCR text snippets. Attributes: @@ -6109,8 +6109,8 @@ { "DESCRIPTOR": _STREAMINGVIDEOCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Provides information to the annotator that specifies how to process the - request. + "__doc__": """Provides information to the annotator that specifies how + to process the request. Attributes: diff --git a/synth.metadata b/synth.metadata index 2822004b..3a8eeb15 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "d1a9f02fd4fb263bae0383b4a5af0bbef33753d6", - "internalRef": "312101156" + "sha": "e94ad376529fabdfd2fce00407926a0cefd38b9e", + "internalRef": "312104480" } }, { From 170caf7b58ba32411222447c9c6523b57a6ad5b1 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:06:27 -0700 Subject: [PATCH 08/17] Dialogflow weekly v2 library update: - Minor comment updates. PiperOrigin-RevId: 312123588 Source-Author: Google APIs Source-Date: Mon May 18 11:49:18 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: cd3ce2651c3921670217e664303976cdf76e9fe2 Source-Link: https://github.com/googleapis/googleapis/commit/cd3ce2651c3921670217e664303976cdf76e9fe2 --- .../proto/video_intelligence_pb2.py | 10 +++++----- synth.metadata | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index ea092a46..76eeb5ab 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -5937,9 +5937,9 @@ { "DESCRIPTOR": _TEXTFRAME, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Video frame level annotation results for text annotation (OCR). Contains - information regarding timestamp and bounding box locations for the - frames containing detected OCR text snippets. + "__doc__": """Video frame level annotation results for text annotation + (OCR). Contains information regarding timestamp and bounding box + locations for the frames containing detected OCR text snippets. Attributes: @@ -6109,8 +6109,8 @@ { "DESCRIPTOR": _STREAMINGVIDEOCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Provides information to the annotator that specifies how - to process the request. + "__doc__": """Provides information to the annotator that specifies how to process the + request. Attributes: diff --git a/synth.metadata b/synth.metadata index 3a8eeb15..d35d965f 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "e94ad376529fabdfd2fce00407926a0cefd38b9e", - "internalRef": "312104480" + "sha": "cd3ce2651c3921670217e664303976cdf76e9fe2", + "internalRef": "312123588" } }, { From 3d792b8ca3a87b0dc3f37ca1976aa3fbef0f763e Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:06:42 -0700 Subject: [PATCH 09/17] chore(python): remove one remaining extra bash line (#565) To avoid generating bogus PRs Co-authored-by: Jeffrey Rennie Source-Author: Carlos de la Guardia Source-Date: Tue May 19 11:57:19 2020 -0500 Source-Repo: googleapis/synthtool Source-Sha: d2364eb80b840a36136c8ce12f1c6efabcc9600e Source-Link: https://github.com/googleapis/synthtool/commit/d2364eb80b840a36136c8ce12f1c6efabcc9600e --- .kokoro/release.sh | 2 -- synth.metadata | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.kokoro/release.sh b/.kokoro/release.sh index 686a595f..bc4c0a2c 100755 --- a/.kokoro/release.sh +++ b/.kokoro/release.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Start the releasetool reporter diff --git a/synth.metadata b/synth.metadata index d35d965f..f370f40e 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,7 +19,7 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "7136daa5687f2a5bdcbba2cb25457f56fdf85d59" + "sha": "d2364eb80b840a36136c8ce12f1c6efabcc9600e" } } ], From c78b9f148035b1fd2c32a6329e190e6bc1c60620 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:07:26 -0700 Subject: [PATCH 10/17] Upgrade protoc-docs-plugin version to 0.7.0 to fix unstable docstrings. PiperOrigin-RevId: 312689208 Source-Author: Google APIs Source-Date: Thu May 21 10:00:47 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: dec3204175104cef49bf21d685d5517caaf0058f Source-Link: https://github.com/googleapis/googleapis/commit/dec3204175104cef49bf21d685d5517caaf0058f --- .../proto/video_intelligence_pb2.py | 164 +++---------- .../proto/video_intelligence_pb2.py | 63 +---- .../proto/video_intelligence_pb2.py | 72 ++---- .../proto/video_intelligence_pb2.py | 108 ++------- .../proto/video_intelligence_pb2.py | 224 ++++-------------- synth.metadata | 6 +- 6 files changed, 143 insertions(+), 494 deletions(-) diff --git a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py index f5d356d3..f2b32782 100644 --- a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py @@ -3601,8 +3601,6 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video annotation request. - - Attributes: input_uri: Input video location. Currently, only `Google Cloud Storage @@ -3652,8 +3650,6 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. - - Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -3686,8 +3682,6 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. - - Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -3730,8 +3724,6 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. - - Attributes: model: Model to use for shot change detection. Supported values: @@ -3749,8 +3741,6 @@ "DESCRIPTOR": _OBJECTTRACKINGCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for OBJECT_TRACKING. - - Attributes: model: Model to use for object tracking. Supported values: @@ -3768,8 +3758,6 @@ "DESCRIPTOR": _FACEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for FACE_DETECTION. - - Attributes: model: Model to use for face detection. Supported values: @@ -3790,8 +3778,6 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. - - Attributes: model: Model to use for explicit content detection. Supported values: @@ -3809,8 +3795,6 @@ "DESCRIPTOR": _TEXTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for TEXT_DETECTION. - - Attributes: language_hints: Language hint can be specified if the language to be detected @@ -3834,8 +3818,6 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video segment. - - Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -3855,10 +3837,7 @@ { "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Video segment level annotation results for label - detection. - - + "__doc__": """Video segment level annotation results for label detection. Attributes: segment: Video segment where a label was detected. @@ -3877,8 +3856,6 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -3898,8 +3875,6 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. - - Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -3922,8 +3897,6 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Label annotation. - - Attributes: entity: Detected entity. @@ -3949,8 +3922,6 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -3969,11 +3940,9 @@ { "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Explicit content annotation (based on per-frame visual - signals only). If no explicit content has been detected in a frame, no - annotations are present for that frame. - - + "__doc__": """Explicit content annotation (based on per-frame visual signals only). + If no explicit content has been detected in a frame, no annotations + are present for that frame. Attributes: frames: All video frames where explicit content was detected. @@ -3989,10 +3958,8 @@ { "DESCRIPTOR": _NORMALIZEDBOUNDINGBOX, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Normalized bounding box. The normalized vertex coordinates - are relative to the original image. Range: [0, 1]. - - + "__doc__": """Normalized bounding box. The normalized vertex coordinates are + relative to the original image. Range: [0, 1]. Attributes: left: Left X coordinate. @@ -4015,8 +3982,6 @@ "DESCRIPTOR": _FACESEGMENT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for face detection. - - Attributes: segment: Video segment where a face was detected. @@ -4033,8 +3998,6 @@ "DESCRIPTOR": _FACEFRAME, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for face detection. - - Attributes: normalized_bounding_boxes: Normalized Bounding boxes in a frame. There can be more than @@ -4056,8 +4019,6 @@ "DESCRIPTOR": _FACEANNOTATION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Face annotation. - - Attributes: thumbnail: Thumbnail of a representative face view (in JPEG format). @@ -4077,10 +4038,8 @@ { "DESCRIPTOR": _TIMESTAMPEDOBJECT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """For tracking related features. An object at time_offset - with attributes, and located with normalized_bounding_box. - - + "__doc__": """For tracking related features. An object at time_offset with + attributes, and located with normalized_bounding_box. Attributes: normalized_bounding_box: Normalized Bounding box in a frame, where the object is @@ -4105,8 +4064,6 @@ "DESCRIPTOR": _TRACK, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """A track of an object instance. - - Attributes: segment: Video segment of a track. @@ -4129,10 +4086,7 @@ { "DESCRIPTOR": _DETECTEDATTRIBUTE, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """A generic detected attribute represented by name in string - format. - - + "__doc__": """A generic detected attribute represented by name in string format. Attributes: name: The name of the attribute, i.e. glasses, dark_glasses, @@ -4155,10 +4109,8 @@ { "DESCRIPTOR": _DETECTEDLANDMARK, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """A generic detected landmark represented by name in string - format and a 2D location. - - + "__doc__": """A generic detected landmark represented by name in string format and a + 2D location. Attributes: name: The name of this landmark, i.e. left_hand, right_shoulder. @@ -4181,8 +4133,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. - - Attributes: input_uri: Video file location in `Google Cloud Storage @@ -4250,11 +4200,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Video annotation response. Included in the ``response`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation response. Included in the ``response`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_results: Annotation results for all videos specified in @@ -4272,8 +4220,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. - - Attributes: input_uri: Video file location in `Google Cloud Storage @@ -4303,11 +4249,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Video annotation progress. Included in the ``metadata`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_progress: Progress metadata for all videos specified in @@ -4325,8 +4269,6 @@ "DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for SPEECH_TRANSCRIPTION. - - Attributes: language_code: Required. *Required* The language of the supplied audio as a @@ -4394,10 +4336,8 @@ { "DESCRIPTOR": _SPEECHCONTEXT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Provides “hints” to the speech recognizer to favor - specific words and phrases in the results. - - + "__doc__": """Provides “hints” to the speech recognizer to favor specific words and + phrases in the results. Attributes: phrases: Optional. A list of strings containing words and phrases @@ -4420,10 +4360,7 @@ { "DESCRIPTOR": _SPEECHTRANSCRIPTION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """A speech recognition result corresponding to a portion of - the audio. - - + "__doc__": """A speech recognition result corresponding to a portion of the audio. Attributes: alternatives: May contain one or more recognition hypotheses (up to the @@ -4449,8 +4386,6 @@ "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Alternative hypotheses (a.k.a. n-best list). - - Attributes: transcript: Transcript text representing the words that the user spoke. @@ -4479,11 +4414,9 @@ { "DESCRIPTOR": _WORDINFO, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Word-specific information for recognized words. Word - information is only included in the response when certain request - parameters are set, such as ``enable_word_time_offsets``. - - + "__doc__": """Word-specific information for recognized words. Word information is + only included in the response when certain request parameters are set, + such as ``enable_word_time_offsets``. Attributes: start_time: Time offset relative to the beginning of the audio, and @@ -4526,8 +4459,6 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """X coordinate. - - Attributes: y: Y coordinate. @@ -4543,20 +4474,14 @@ { "DESCRIPTOR": _NORMALIZEDBOUNDINGPOLY, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Normalized bounding polygon for text (that might not be - aligned with axis). Contains list of the corner points in clockwise - order starting from top-left corner. For example, for a rectangular - bounding box: When the text is horizontal it might look like: 0—-1 \| \| - 3—-2 - - When it’s clockwise rotated 180 degrees around the top-left corner it - becomes: 2—-3 \| \| 1—-0 - - and the vertex order will still be (0, 1, 2, 3). Note that values can be - less than 0, or greater than 1 due to trignometric calculations for - location of the box. - - + "__doc__": """Normalized bounding polygon for text (that might not be aligned with + axis). Contains list of the corner points in clockwise order starting + from top-left corner. For example, for a rectangular bounding box: + When the text is horizontal it might look like: 0—-1 \| \| 3—-2 When + it’s clockwise rotated 180 degrees around the top-left corner it + becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2, + 3). Note that values can be less than 0, or greater than 1 due to + trignometric calculations for location of the box. Attributes: vertices: Normalized vertices of the bounding polygon. @@ -4573,8 +4498,6 @@ "DESCRIPTOR": _TEXTSEGMENT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for text detection. - - Attributes: segment: Video segment where a text snippet was detected. @@ -4596,11 +4519,9 @@ { "DESCRIPTOR": _TEXTFRAME, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Video frame level annotation results for text annotation - (OCR). Contains information regarding timestamp and bounding box - locations for the frames containing detected OCR text snippets. - - + "__doc__": """Video frame level annotation results for text annotation (OCR). + Contains information regarding timestamp and bounding box locations + for the frames containing detected OCR text snippets. Attributes: rotated_bounding_box: Bounding polygon of the detected text for this frame. @@ -4618,11 +4539,9 @@ { "DESCRIPTOR": _TEXTANNOTATION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Annotations related to one detected OCR text snippet. This - will contain the corresponding text, confidence value, and frame level + "__doc__": """Annotations related to one detected OCR text snippet. This will + contain the corresponding text, confidence value, and frame level information for each detection. - - Attributes: text: The detected text. @@ -4640,11 +4559,8 @@ { "DESCRIPTOR": _OBJECTTRACKINGFRAME, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Video frame level annotations for object detection and - tracking. This field stores per frame location, time offset, and - confidence. - - + "__doc__": """Video frame level annotations for object detection and tracking. This + field stores per frame location, time offset, and confidence. Attributes: normalized_bounding_box: The normalized bounding box location of this object track for @@ -4664,8 +4580,6 @@ "DESCRIPTOR": _OBJECTTRACKINGANNOTATION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Annotations corresponding to one tracked object. - - Attributes: track_info: Different representation of tracking info in non-streaming @@ -4703,10 +4617,8 @@ { "DESCRIPTOR": _LOGORECOGNITIONANNOTATION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - "__doc__": """Annotation corresponding to one detected, tracked and - recognized logo class. - - + "__doc__": """Annotation corresponding to one detected, tracked and recognized logo + class. Attributes: entity: Entity category information to specify the logo class that all diff --git a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py index c1df7128..8e4b6615 100644 --- a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py @@ -1776,8 +1776,6 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video annotation request. - - Attributes: input_uri: Input video location. Currently, only `Google Cloud Storage @@ -1827,8 +1825,6 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. - - Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -1855,8 +1851,6 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. - - Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -1883,8 +1877,6 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. - - Attributes: model: Model to use for shot change detection. Supported values: @@ -1902,8 +1894,6 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. - - Attributes: model: Model to use for explicit content detection. Supported values: @@ -1921,8 +1911,6 @@ "DESCRIPTOR": _FACEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Config for FACE_DETECTION. - - Attributes: model: Model to use for face detection. Supported values: @@ -1943,8 +1931,6 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video segment. - - Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -1964,10 +1950,7 @@ { "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - "__doc__": """Video segment level annotation results for label - detection. - - + "__doc__": """Video segment level annotation results for label detection. Attributes: segment: Video segment where a label was detected. @@ -1986,8 +1969,6 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2007,8 +1988,6 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. - - Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -2031,8 +2010,6 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Label annotation. - - Attributes: entity: Detected entity. @@ -2058,8 +2035,6 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2078,11 +2053,9 @@ { "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - "__doc__": """Explicit content annotation (based on per-frame visual - signals only). If no explicit content has been detected in a frame, no - annotations are present for that frame. - - + "__doc__": """Explicit content annotation (based on per-frame visual signals only). + If no explicit content has been detected in a frame, no annotations + are present for that frame. Attributes: frames: All video frames where explicit content was detected. @@ -2098,10 +2071,8 @@ { "DESCRIPTOR": _NORMALIZEDBOUNDINGBOX, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - "__doc__": """Normalized bounding box. The normalized vertex coordinates - are relative to the original image. Range: [0, 1]. - - + "__doc__": """Normalized bounding box. The normalized vertex coordinates are + relative to the original image. Range: [0, 1]. Attributes: left: Left X coordinate. @@ -2124,8 +2095,6 @@ "DESCRIPTOR": _FACESEGMENT, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for face detection. - - Attributes: segment: Video segment where a face was detected. @@ -2142,8 +2111,6 @@ "DESCRIPTOR": _FACEFRAME, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for face detection. - - Attributes: normalized_bounding_boxes: Normalized Bounding boxes in a frame. There can be more than @@ -2165,8 +2132,6 @@ "DESCRIPTOR": _FACEANNOTATION, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Face annotation. - - Attributes: thumbnail: Thumbnail of a representative face view (in JPEG format). @@ -2187,8 +2152,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. - - Attributes: input_uri: Video file location in `Google Cloud Storage @@ -2225,11 +2188,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - "__doc__": """Video annotation response. Included in the ``response`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation response. Included in the ``response`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_results: Annotation results for all videos specified in @@ -2247,8 +2208,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. - - Attributes: input_uri: Video file location in `Google Cloud Storage @@ -2272,11 +2231,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - "__doc__": """Video annotation progress. Included in the ``metadata`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_progress: Progress metadata for all videos specified in diff --git a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py index 688dfc9c..c1cfdf44 100644 --- a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py @@ -1824,8 +1824,6 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video annotation request. - - Attributes: input_uri: Input video location. Currently, only `Google Cloud Storage @@ -1875,8 +1873,6 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. - - Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -1903,8 +1899,6 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. - - Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -1931,8 +1925,6 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. - - Attributes: model: Model to use for shot change detection. Supported values: @@ -1950,8 +1942,6 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. - - Attributes: model: Model to use for explicit content detection. Supported values: @@ -1969,8 +1959,6 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video segment. - - Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -1990,10 +1978,7 @@ { "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - "__doc__": """Video segment level annotation results for label - detection. - - + "__doc__": """Video segment level annotation results for label detection. Attributes: segment: Video segment where a label was detected. @@ -2012,8 +1997,6 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2033,8 +2016,6 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. - - Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -2057,8 +2038,6 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Label annotation. - - Attributes: entity: Detected entity. @@ -2084,8 +2063,6 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2104,11 +2081,9 @@ { "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - "__doc__": """Explicit content annotation (based on per-frame visual - signals only). If no explicit content has been detected in a frame, no - annotations are present for that frame. - - + "__doc__": """Explicit content annotation (based on per-frame visual signals only). + If no explicit content has been detected in a frame, no annotations + are present for that frame. Attributes: frames: All video frames where explicit content was detected. @@ -2125,8 +2100,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. - - Attributes: input_uri: Output only. Video file location in `Google Cloud Storage @@ -2162,11 +2135,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - "__doc__": """Video annotation response. Included in the ``response`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation response. Included in the ``response`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_results: Annotation results for all videos specified in @@ -2184,8 +2155,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. - - Attributes: input_uri: Output only. Video file location in `Google Cloud Storage @@ -2209,11 +2178,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - "__doc__": """Video annotation progress. Included in the ``metadata`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_progress: Progress metadata for all videos specified in @@ -2231,8 +2198,6 @@ "DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Config for SPEECH_TRANSCRIPTION. - - Attributes: language_code: Required. *Required* The language of the supplied audio as a @@ -2282,10 +2247,8 @@ { "DESCRIPTOR": _SPEECHCONTEXT, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - "__doc__": """Provides “hints” to the speech recognizer to favor - specific words and phrases in the results. - - + "__doc__": """Provides “hints” to the speech recognizer to favor specific words and + phrases in the results. Attributes: phrases: Optional. A list of strings containing words and phrases @@ -2308,10 +2271,7 @@ { "DESCRIPTOR": _SPEECHTRANSCRIPTION, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - "__doc__": """A speech recognition result corresponding to a portion of - the audio. - - + "__doc__": """A speech recognition result corresponding to a portion of the audio. Attributes: alternatives: May contain one or more recognition hypotheses (up to the @@ -2332,8 +2292,6 @@ "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Alternative hypotheses (a.k.a. n-best list). - - Attributes: transcript: Output only. Transcript text representing the words that the @@ -2361,11 +2319,9 @@ { "DESCRIPTOR": _WORDINFO, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - "__doc__": """Word-specific information for recognized words. Word - information is only included in the response when certain request - parameters are set, such as ``enable_word_time_offsets``. - - + "__doc__": """Word-specific information for recognized words. Word information is + only included in the response when certain request parameters are set, + such as ``enable_word_time_offsets``. Attributes: start_time: Output only. Time offset relative to the beginning of the diff --git a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py index 1b8de3fb..66291643 100644 --- a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py @@ -2063,8 +2063,6 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video annotation request. - - Attributes: input_uri: Input video location. Currently, only `Google Cloud Storage @@ -2114,8 +2112,6 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. - - Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -2142,8 +2138,6 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. - - Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -2170,8 +2164,6 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. - - Attributes: model: Model to use for shot change detection. Supported values: @@ -2189,8 +2181,6 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. - - Attributes: model: Model to use for explicit content detection. Supported values: @@ -2208,8 +2198,6 @@ "DESCRIPTOR": _TEXTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Config for TEXT_DETECTION. - - Attributes: language_hints: Language hint can be specified if the language to be detected @@ -2230,8 +2218,6 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video segment. - - Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -2251,10 +2237,7 @@ { "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Video segment level annotation results for label - detection. - - + "__doc__": """Video segment level annotation results for label detection. Attributes: segment: Video segment where a label was detected. @@ -2273,8 +2256,6 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2294,8 +2275,6 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. - - Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -2318,8 +2297,6 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Label annotation. - - Attributes: entity: Detected entity. @@ -2345,8 +2322,6 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2365,11 +2340,9 @@ { "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Explicit content annotation (based on per-frame visual - signals only). If no explicit content has been detected in a frame, no - annotations are present for that frame. - - + "__doc__": """Explicit content annotation (based on per-frame visual signals only). + If no explicit content has been detected in a frame, no annotations + are present for that frame. Attributes: frames: All video frames where explicit content was detected. @@ -2385,10 +2358,8 @@ { "DESCRIPTOR": _NORMALIZEDBOUNDINGBOX, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Normalized bounding box. The normalized vertex coordinates - are relative to the original image. Range: [0, 1]. - - + "__doc__": """Normalized bounding box. The normalized vertex coordinates are + relative to the original image. Range: [0, 1]. Attributes: left: Left X coordinate. @@ -2411,8 +2382,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. - - Attributes: input_uri: Video file location in `Google Cloud Storage @@ -2452,11 +2421,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Video annotation response. Included in the ``response`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation response. Included in the ``response`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_results: Annotation results for all videos specified in @@ -2474,8 +2441,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. - - Attributes: input_uri: Video file location in `Google Cloud Storage @@ -2499,11 +2464,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Video annotation progress. Included in the ``metadata`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_progress: Progress metadata for all videos specified in @@ -2521,8 +2484,6 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """X coordinate. - - Attributes: y: Y coordinate. @@ -2538,20 +2499,14 @@ { "DESCRIPTOR": _NORMALIZEDBOUNDINGPOLY, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Normalized bounding polygon for text (that might not be - aligned with axis). Contains list of the corner points in clockwise - order starting from top-left corner. For example, for a rectangular - bounding box: When the text is horizontal it might look like: 0—-1 \| \| - 3—-2 - - When it’s clockwise rotated 180 degrees around the top-left corner it - becomes: 2—-3 \| \| 1—-0 - - and the vertex order will still be (0, 1, 2, 3). Note that values can be - less than 0, or greater than 1 due to trignometric calculations for - location of the box. - - + "__doc__": """Normalized bounding polygon for text (that might not be aligned with + axis). Contains list of the corner points in clockwise order starting + from top-left corner. For example, for a rectangular bounding box: + When the text is horizontal it might look like: 0—-1 \| \| 3—-2 When + it’s clockwise rotated 180 degrees around the top-left corner it + becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2, + 3). Note that values can be less than 0, or greater than 1 due to + trignometric calculations for location of the box. Attributes: vertices: Normalized vertices of the bounding polygon. @@ -2568,8 +2523,6 @@ "DESCRIPTOR": _TEXTSEGMENT, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for text detection. - - Attributes: segment: Video segment where a text snippet was detected. @@ -2591,11 +2544,9 @@ { "DESCRIPTOR": _TEXTFRAME, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Video frame level annotation results for text annotation - (OCR). Contains information regarding timestamp and bounding box - locations for the frames containing detected OCR text snippets. - - + "__doc__": """Video frame level annotation results for text annotation (OCR). + Contains information regarding timestamp and bounding box locations + for the frames containing detected OCR text snippets. Attributes: rotated_bounding_box: Bounding polygon of the detected text for this frame. @@ -2613,11 +2564,9 @@ { "DESCRIPTOR": _TEXTANNOTATION, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Annotations related to one detected OCR text snippet. This will contain - the corresponding text, confidence value, and frame level information - for each detection. - - + "__doc__": """Annotations related to one detected OCR text snippet. This will + contain the corresponding text, confidence value, and frame level + information for each detection. Attributes: text: The detected text. @@ -2635,11 +2584,8 @@ { "DESCRIPTOR": _OBJECTTRACKINGFRAME, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - "__doc__": """Video frame level annotations for object detection and - tracking. This field stores per frame location, time offset, and - confidence. - - + "__doc__": """Video frame level annotations for object detection and tracking. This + field stores per frame location, time offset, and confidence. Attributes: normalized_bounding_box: The normalized bounding box location of this object track for @@ -2659,8 +2605,6 @@ "DESCRIPTOR": _OBJECTTRACKINGANNOTATION, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Annotations corresponding to one tracked object. - - Attributes: entity: Entity to specify the object category that this track is diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index 76eeb5ab..6ead460f 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -4845,8 +4845,6 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video annotation request. - - Attributes: input_uri: Input video location. Currently, only `Cloud Storage @@ -4896,8 +4894,6 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. - - Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -4932,8 +4928,6 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. - - Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -4975,8 +4969,6 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. - - Attributes: model: Model to use for shot change detection. Supported values: @@ -4994,8 +4986,6 @@ "DESCRIPTOR": _OBJECTTRACKINGCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for OBJECT_TRACKING. - - Attributes: model: Model to use for object tracking. Supported values: @@ -5013,8 +5003,6 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. - - Attributes: model: Model to use for explicit content detection. Supported values: @@ -5032,8 +5020,6 @@ "DESCRIPTOR": _FACEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for FACE_DETECTION. - - Attributes: model: Model to use for face detection. Supported values: @@ -5058,8 +5044,6 @@ "DESCRIPTOR": _PERSONDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for PERSON_DETECTION. - - Attributes: include_bounding_boxes: Whether bounding boxes are included in the person detection @@ -5085,8 +5069,6 @@ "DESCRIPTOR": _TEXTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for TEXT_DETECTION. - - Attributes: language_hints: Language hint can be specified if the language to be detected @@ -5110,8 +5092,6 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video segment. - - Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -5131,10 +5111,7 @@ { "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Video segment level annotation results for label - detection. - - + "__doc__": """Video segment level annotation results for label detection. Attributes: segment: Video segment where a label was detected. @@ -5153,8 +5130,6 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -5174,8 +5149,6 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. - - Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -5198,8 +5171,6 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Label annotation. - - Attributes: entity: Detected entity. @@ -5225,8 +5196,6 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. - - Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -5245,11 +5214,9 @@ { "DESCRIPTOR": _EXPLICITCONTENTANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Explicit content annotation (based on per-frame visual - signals only). If no explicit content has been detected in a frame, no - annotations are present for that frame. - - + "__doc__": """Explicit content annotation (based on per-frame visual signals only). + If no explicit content has been detected in a frame, no annotations + are present for that frame. Attributes: frames: All video frames where explicit content was detected. @@ -5265,10 +5232,8 @@ { "DESCRIPTOR": _NORMALIZEDBOUNDINGBOX, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Normalized bounding box. The normalized vertex coordinates - are relative to the original image. Range: [0, 1]. - - + "__doc__": """Normalized bounding box. The normalized vertex coordinates are + relative to the original image. Range: [0, 1]. Attributes: left: Left X coordinate. @@ -5290,10 +5255,8 @@ { "DESCRIPTOR": _TIMESTAMPEDOBJECT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """For tracking related features. An object at time_offset - with attributes, and located with normalized_bounding_box. - - + "__doc__": """For tracking related features. An object at time_offset with + attributes, and located with normalized_bounding_box. Attributes: normalized_bounding_box: Normalized Bounding box in a frame, where the object is @@ -5318,8 +5281,6 @@ "DESCRIPTOR": _TRACK, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """A track of an object instance. - - Attributes: segment: Video segment of a track. @@ -5342,10 +5303,7 @@ { "DESCRIPTOR": _DETECTEDATTRIBUTE, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """A generic detected attribute represented by name in string - format. - - + "__doc__": """A generic detected attribute represented by name in string format. Attributes: name: The name of the attribute, for example, glasses, dark_glasses, @@ -5369,8 +5327,6 @@ "DESCRIPTOR": _CELEBRITY, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Celebrity definition. - - Attributes: name: The resource name of the celebrity. Have the format ``video- @@ -5399,8 +5355,6 @@ "DESCRIPTOR": _CELEBRITYTRACK_RECOGNIZEDCELEBRITY, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """The recognized celebrity with confidence score. - - Attributes: celebrity: The recognized celebrity. @@ -5412,11 +5366,9 @@ ), "DESCRIPTOR": _CELEBRITYTRACK, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """The annotation result of a celebrity face track. - RecognizedCelebrity field could be empty if the face track does not have - any matched celebrities. - - + "__doc__": """The annotation result of a celebrity face track. RecognizedCelebrity + field could be empty if the face track does not have any matched + celebrities. Attributes: celebrities: Top N match of the celebrities for the face in this track. @@ -5436,8 +5388,6 @@ "DESCRIPTOR": _CELEBRITYRECOGNITIONANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Celebrity recognition annotation per video. - - Attributes: celebrity_tracks: The tracks detected from the input video, including recognized @@ -5454,10 +5404,8 @@ { "DESCRIPTOR": _DETECTEDLANDMARK, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """A generic detected landmark represented by name in string - format and a 2D location. - - + "__doc__": """A generic detected landmark represented by name in string format and a + 2D location. Attributes: name: The name of this landmark, for example, left_hand, @@ -5481,8 +5429,6 @@ "DESCRIPTOR": _FACEDETECTIONANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Face detection annotation. - - Attributes: tracks: The face tracks with attributes. @@ -5501,8 +5447,6 @@ "DESCRIPTOR": _PERSONDETECTIONANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Person detection annotation per video. - - Attributes: tracks: The detected tracks of a person. @@ -5519,8 +5463,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. - - Attributes: input_uri: Video file location in `Cloud Storage @@ -5591,11 +5533,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEORESPONSE, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Video annotation response. Included in the ``response`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation response. Included in the ``response`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_results: Annotation results for all videos specified in @@ -5613,8 +5553,6 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. - - Attributes: input_uri: Video file location in `Cloud Storage @@ -5644,11 +5582,9 @@ { "DESCRIPTOR": _ANNOTATEVIDEOPROGRESS, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Video annotation progress. Included in the ``metadata`` - field of the ``Operation`` returned by the ``GetOperation`` call of the + "__doc__": """Video annotation progress. Included in the ``metadata`` field of the + ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. - - Attributes: annotation_progress: Progress metadata for all videos specified in @@ -5666,8 +5602,6 @@ "DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for SPEECH_TRANSCRIPTION. - - Attributes: language_code: Required. *Required* The language of the supplied audio as a @@ -5735,10 +5669,8 @@ { "DESCRIPTOR": _SPEECHCONTEXT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Provides “hints” to the speech recognizer to favor - specific words and phrases in the results. - - + "__doc__": """Provides “hints” to the speech recognizer to favor specific words and + phrases in the results. Attributes: phrases: Optional. A list of strings containing words and phrases @@ -5761,10 +5693,7 @@ { "DESCRIPTOR": _SPEECHTRANSCRIPTION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """A speech recognition result corresponding to a portion of - the audio. - - + "__doc__": """A speech recognition result corresponding to a portion of the audio. Attributes: alternatives: May contain one or more recognition hypotheses (up to the @@ -5790,8 +5719,6 @@ "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Alternative hypotheses (a.k.a. n-best list). - - Attributes: transcript: Transcript text representing the words that the user spoke. @@ -5820,11 +5747,9 @@ { "DESCRIPTOR": _WORDINFO, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Word-specific information for recognized words. Word - information is only included in the response when certain request - parameters are set, such as ``enable_word_time_offsets``. - - + "__doc__": """Word-specific information for recognized words. Word information is + only included in the response when certain request parameters are set, + such as ``enable_word_time_offsets``. Attributes: start_time: Time offset relative to the beginning of the audio, and @@ -5867,8 +5792,6 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """X coordinate. - - Attributes: y: Y coordinate. @@ -5884,20 +5807,14 @@ { "DESCRIPTOR": _NORMALIZEDBOUNDINGPOLY, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Normalized bounding polygon for text (that might not be - aligned with axis). Contains list of the corner points in clockwise - order starting from top-left corner. For example, for a rectangular - bounding box: When the text is horizontal it might look like: 0—-1 \| \| - 3—-2 - - When it’s clockwise rotated 180 degrees around the top-left corner it - becomes: 2—-3 \| \| 1—-0 - - and the vertex order will still be (0, 1, 2, 3). Note that values can be - less than 0, or greater than 1 due to trignometric calculations for - location of the box. - - + "__doc__": """Normalized bounding polygon for text (that might not be aligned with + axis). Contains list of the corner points in clockwise order starting + from top-left corner. For example, for a rectangular bounding box: + When the text is horizontal it might look like: 0—-1 \| \| 3—-2 When + it’s clockwise rotated 180 degrees around the top-left corner it + becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2, + 3). Note that values can be less than 0, or greater than 1 due to + trignometric calculations for location of the box. Attributes: vertices: Normalized vertices of the bounding polygon. @@ -5914,8 +5831,6 @@ "DESCRIPTOR": _TEXTSEGMENT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for text detection. - - Attributes: segment: Video segment where a text snippet was detected. @@ -5937,11 +5852,9 @@ { "DESCRIPTOR": _TEXTFRAME, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Video frame level annotation results for text annotation - (OCR). Contains information regarding timestamp and bounding box - locations for the frames containing detected OCR text snippets. - - + "__doc__": """Video frame level annotation results for text annotation (OCR). + Contains information regarding timestamp and bounding box locations + for the frames containing detected OCR text snippets. Attributes: rotated_bounding_box: Bounding polygon of the detected text for this frame. @@ -5959,11 +5872,9 @@ { "DESCRIPTOR": _TEXTANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Annotations related to one detected OCR text snippet. This - will contain the corresponding text, confidence value, and frame level + "__doc__": """Annotations related to one detected OCR text snippet. This will + contain the corresponding text, confidence value, and frame level information for each detection. - - Attributes: text: The detected text. @@ -5981,11 +5892,8 @@ { "DESCRIPTOR": _OBJECTTRACKINGFRAME, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Video frame level annotations for object detection and - tracking. This field stores per frame location, time offset, and - confidence. - - + "__doc__": """Video frame level annotations for object detection and tracking. This + field stores per frame location, time offset, and confidence. Attributes: normalized_bounding_box: The normalized bounding box location of this object track for @@ -6005,8 +5913,6 @@ "DESCRIPTOR": _OBJECTTRACKINGANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Annotations corresponding to one tracked object. - - Attributes: track_info: Different representation of tracking info in non-streaming @@ -6044,10 +5950,8 @@ { "DESCRIPTOR": _LOGORECOGNITIONANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Annotation corresponding to one detected, tracked and - recognized logo class. - - + "__doc__": """Annotation corresponding to one detected, tracked and recognized logo + class. Attributes: entity: Entity category information to specify the logo class that all @@ -6078,8 +5982,6 @@ ``StreamingAnnotateVideoRequest`` messages are sent. The first message must only contain a ``StreamingVideoConfig`` message. All subsequent messages must only contain ``input_content`` data. - - Attributes: streaming_request: \ *Required* The streaming request, which is either a @@ -6109,10 +6011,8 @@ { "DESCRIPTOR": _STREAMINGVIDEOCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Provides information to the annotator that specifies how to process the - request. - - + "__doc__": """Provides information to the annotator that specifies how to process + the request. Attributes: streaming_config: Config for requested annotation feature. @@ -6146,12 +6046,10 @@ { "DESCRIPTOR": _STREAMINGANNOTATEVIDEORESPONSE, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """\ ``StreamingAnnotateVideoResponse`` is the only message - returned to the client by ``StreamingAnnotateVideo``. A series of zero - or more ``StreamingAnnotateVideoResponse`` messages are streamed back to - the client. - - + "__doc__": """\ ``StreamingAnnotateVideoResponse`` is the only message returned to + the client by ``StreamingAnnotateVideo``. A series of zero or more + ``StreamingAnnotateVideoResponse`` messages are streamed back to the + client. Attributes: error: If set, returns a [google.rpc.Status][google.rpc.Status] @@ -6175,10 +6073,8 @@ { "DESCRIPTOR": _STREAMINGVIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Streaming annotation results corresponding to a portion of - the video that is currently being processed. - - + "__doc__": """Streaming annotation results corresponding to a portion of the video + that is currently being processed. Attributes: shot_annotations: Shot annotation results. Each shot is represented as a video @@ -6201,9 +6097,7 @@ { "DESCRIPTOR": _STREAMINGSHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Config for STREAMING_SHOT_CHANGE_DETECTION. - - """, + "__doc__": """Config for STREAMING_SHOT_CHANGE_DETECTION.""", # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig) }, ) @@ -6216,8 +6110,6 @@ "DESCRIPTOR": _STREAMINGLABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for STREAMING_LABEL_DETECTION. - - Attributes: stationary_camera: Whether the video has been captured from a stationary @@ -6235,9 +6127,7 @@ { "DESCRIPTOR": _STREAMINGEXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Config for STREAMING_EXPLICIT_CONTENT_DETECTION. - - """, + "__doc__": """Config for STREAMING_EXPLICIT_CONTENT_DETECTION.""", # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig) }, ) @@ -6249,9 +6139,7 @@ { "DESCRIPTOR": _STREAMINGOBJECTTRACKINGCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """Config for STREAMING_OBJECT_TRACKING. - - """, + "__doc__": """Config for STREAMING_OBJECT_TRACKING.""", # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig) }, ) @@ -6264,8 +6152,6 @@ "DESCRIPTOR": _STREAMINGAUTOMLACTIONRECOGNITIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for STREAMING_AUTOML_ACTION_RECOGNITION. - - Attributes: model_name: Resource name of AutoML model. Format: ``projects/{project_id} @@ -6283,8 +6169,6 @@ "DESCRIPTOR": _STREAMINGAUTOMLCLASSIFICATIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for STREAMING_AUTOML_CLASSIFICATION. - - Attributes: model_name: Resource name of AutoML model. Format: ``projects/{project_num @@ -6302,8 +6186,6 @@ "DESCRIPTOR": _STREAMINGAUTOMLOBJECTTRACKINGCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for STREAMING_AUTOML_OBJECT_TRACKING. - - Attributes: model_name: Resource name of AutoML model. Format: ``projects/{project_id} @@ -6321,8 +6203,6 @@ "DESCRIPTOR": _STREAMINGSTORAGECONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for streaming storage option. - - Attributes: enable_storage_annotation_result: Enable streaming storage. Default: false. diff --git a/synth.metadata b/synth.metadata index f370f40e..3b5c1c84 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,15 +11,15 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "cd3ce2651c3921670217e664303976cdf76e9fe2", - "internalRef": "312123588" + "sha": "dec3204175104cef49bf21d685d5517caaf0058f", + "internalRef": "312689208" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "d2364eb80b840a36136c8ce12f1c6efabcc9600e" + "sha": "7ee92820e64c0aea379781b82399d6b3f3c8655f" } } ], From 167d88a999d64e3be91dec58a12f4a68fc5f2180 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:18:18 -0700 Subject: [PATCH 11/17] bazel: update protobuf, rules_go, gazelle, and gapic-generator-go versions - protobuf v3.12.1 - rules_go v0.23.0 - gazelle v0.21.0 - gapic-generator-go v0.14.1 PiperOrigin-RevId: 313460921 Source-Author: Google APIs Source-Date: Wed May 27 14:10:16 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: c4e37010d74071851ff24121f522e802231ac86e Source-Link: https://github.com/googleapis/googleapis/commit/c4e37010d74071851ff24121f522e802231ac86e --- .../proto/video_intelligence_pb2.py | 249 +++++++++++++- .../proto/video_intelligence_pb2.py | 148 +++++++- .../proto/video_intelligence_pb2.py | 144 +++++++- .../proto/video_intelligence_pb2.py | 163 ++++++++- .../proto/video_intelligence_pb2.py | 316 +++++++++++++++++- synth.metadata | 6 +- 6 files changed, 982 insertions(+), 44 deletions(-) diff --git a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py index f2b32782..954049a6 100644 --- a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py @@ -29,6 +29,7 @@ package="google.cloud.videointelligence.v1", syntax="proto3", serialized_options=b"\n%com.google.cloud.videointelligence.v1B\035VideoIntelligenceServiceProtoP\001ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\252\002!Google.Cloud.VideoIntelligence.V1\312\002!Google\\Cloud\\VideoIntelligence\\V1\352\002$Google::Cloud::VideoIntelligence::V1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\n@google/cloud/videointelligence_v1/proto/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xfe\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x41\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.FeatureB\x03\xe0\x41\x02\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xe6\x05\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12U\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.FaceDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\x94\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood"d\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"O\n\x0b\x46\x61\x63\x65Segment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"\x98\x01\n\tFaceFrame\x12[\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xa3\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.FaceSegment\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.FaceFrame"\xba\x02\n\x11TimestampedObject\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12K\n\tlandmarks\x18\x04 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.DetectedLandmarkB\x03\xe0\x41\x01"\x84\x02\n\x05Track\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Q\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.TimestampedObject\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"x\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x05point\x18\x02 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"\xa1\t\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12@\n\x07segment\x18\n \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12^\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12[\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12K\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.FaceAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12\x62\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32<.google.cloud.videointelligence.v1.LogoRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults"\xa6\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12@\n\x07segment\x18\x06 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress"\x81\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12N\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x88\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x8c\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12?\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"`\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x97\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrameB\x0c\n\ntrack_info"\xd3\x01\n\x19LogoRecognitionAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x38\n\x06tracks\x18\x02 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment*\xdf\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc0\x02\n\x18VideoIntelligenceService\x12\xcd\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"d\x82\xd3\xe4\x93\x02\x18"\x13/v1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x8b\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -46,6 +47,7 @@ full_name="google.cloud.videointelligence.v1.Feature", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="FEATURE_UNSPECIFIED", @@ -53,6 +55,7 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="LABEL_DETECTION", @@ -60,6 +63,7 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_CHANGE_DETECTION", @@ -67,6 +71,7 @@ number=2, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="EXPLICIT_CONTENT_DETECTION", @@ -74,9 +79,15 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FACE_DETECTION", index=4, number=4, serialized_options=None, type=None + name="FACE_DETECTION", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SPEECH_TRANSCRIPTION", @@ -84,9 +95,15 @@ number=6, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TEXT_DETECTION", index=6, number=7, serialized_options=None, type=None + name="TEXT_DETECTION", + index=6, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="OBJECT_TRACKING", @@ -94,6 +111,7 @@ number=9, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="LOGO_RECOGNITION", @@ -101,6 +119,7 @@ number=12, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -116,6 +135,7 @@ full_name="google.cloud.videointelligence.v1.LabelDetectionMode", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LABEL_DETECTION_MODE_UNSPECIFIED", @@ -123,12 +143,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SHOT_MODE", index=1, number=1, serialized_options=None, type=None + name="SHOT_MODE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FRAME_MODE", index=2, number=2, serialized_options=None, type=None + name="FRAME_MODE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_AND_FRAME_MODE", @@ -136,6 +167,7 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -151,6 +183,7 @@ full_name="google.cloud.videointelligence.v1.Likelihood", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LIKELIHOOD_UNSPECIFIED", @@ -158,21 +191,47 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_UNLIKELY", index=1, number=1, serialized_options=None, type=None + name="VERY_UNLIKELY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UNLIKELY", index=2, number=2, serialized_options=None, type=None + name="UNLIKELY", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POSSIBLE", index=3, number=3, serialized_options=None, type=None + name="POSSIBLE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LIKELY", index=4, number=4, serialized_options=None, type=None + name="LIKELY", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_LIKELY", index=5, number=5, serialized_options=None, type=None + name="VERY_LIKELY", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -210,6 +269,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -228,6 +288,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_content", @@ -246,6 +307,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="features", @@ -264,6 +326,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_context", @@ -282,6 +345,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_uri", @@ -300,6 +364,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="location_id", @@ -318,6 +383,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -339,6 +405,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segments", @@ -357,6 +424,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="label_detection_config", @@ -375,6 +443,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_change_detection_config", @@ -393,6 +462,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_content_detection_config", @@ -411,6 +481,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="face_detection_config", @@ -429,6 +500,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speech_transcription_config", @@ -447,6 +519,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_detection_config", @@ -465,6 +538,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="object_tracking_config", @@ -483,6 +557,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -504,6 +579,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="label_detection_mode", @@ -522,6 +598,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stationary_camera", @@ -540,6 +617,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model", @@ -558,6 +636,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frame_confidence_threshold", @@ -576,6 +655,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_confidence_threshold", @@ -594,6 +674,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -615,6 +696,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -633,6 +715,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -654,6 +737,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -672,6 +756,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -693,6 +778,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -711,6 +797,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="include_bounding_boxes", @@ -729,6 +816,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -750,6 +838,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -768,6 +857,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -789,6 +879,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="language_hints", @@ -807,6 +898,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model", @@ -825,6 +917,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -846,6 +939,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time_offset", @@ -864,6 +958,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time_offset", @@ -882,6 +977,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -903,6 +999,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -921,6 +1018,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -939,6 +1037,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -960,6 +1059,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -978,6 +1078,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -996,6 +1097,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1017,6 +1119,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity_id", @@ -1035,6 +1138,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -1053,6 +1157,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language_code", @@ -1071,6 +1176,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1092,6 +1198,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity", @@ -1110,6 +1217,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="category_entities", @@ -1128,6 +1236,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -1146,6 +1255,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -1164,6 +1274,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1185,6 +1296,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -1203,6 +1315,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pornography_likelihood", @@ -1221,6 +1334,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1242,6 +1356,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="frames", @@ -1260,6 +1375,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1281,6 +1397,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="left", @@ -1299,6 +1416,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="top", @@ -1317,6 +1435,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="right", @@ -1335,6 +1454,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bottom", @@ -1353,6 +1473,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1374,6 +1495,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -1392,6 +1514,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1413,6 +1536,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="normalized_bounding_boxes", @@ -1431,6 +1555,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -1449,6 +1574,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1470,6 +1596,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="thumbnail", @@ -1488,6 +1615,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -1506,6 +1634,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -1524,6 +1653,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1545,6 +1675,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="normalized_bounding_box", @@ -1563,6 +1694,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -1581,6 +1713,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="attributes", @@ -1599,6 +1732,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="landmarks", @@ -1617,6 +1751,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1638,6 +1773,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -1656,6 +1792,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="timestamped_objects", @@ -1674,6 +1811,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="attributes", @@ -1692,6 +1830,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1710,6 +1849,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1731,6 +1871,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1749,6 +1890,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1767,6 +1909,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -1785,6 +1928,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1806,6 +1950,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1824,6 +1969,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="point", @@ -1842,6 +1988,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1860,6 +2007,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1881,6 +2029,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -1899,6 +2048,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment", @@ -1917,6 +2067,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment_label_annotations", @@ -1935,6 +2086,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment_presence_label_annotations", @@ -1953,6 +2105,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_label_annotations", @@ -1971,6 +2124,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_presence_label_annotations", @@ -1989,6 +2143,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frame_label_annotations", @@ -2007,6 +2162,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="face_annotations", @@ -2025,6 +2181,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_annotations", @@ -2043,6 +2200,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_annotation", @@ -2061,6 +2219,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speech_transcriptions", @@ -2079,6 +2238,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_annotations", @@ -2097,6 +2257,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="object_annotations", @@ -2115,6 +2276,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="logo_recognition_annotations", @@ -2133,6 +2295,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="error", @@ -2151,6 +2314,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2172,6 +2336,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_results", @@ -2190,6 +2355,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2211,6 +2377,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -2229,6 +2396,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="progress_percent", @@ -2247,6 +2415,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_time", @@ -2265,6 +2434,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_time", @@ -2283,6 +2453,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="feature", @@ -2301,6 +2472,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment", @@ -2319,6 +2491,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2340,6 +2513,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_progress", @@ -2358,6 +2532,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2379,6 +2554,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="language_code", @@ -2397,6 +2573,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_alternatives", @@ -2415,6 +2592,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter_profanity", @@ -2433,6 +2611,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speech_contexts", @@ -2451,6 +2630,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="enable_automatic_punctuation", @@ -2469,6 +2649,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="audio_tracks", @@ -2487,6 +2668,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="enable_speaker_diarization", @@ -2505,6 +2687,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="diarization_speaker_count", @@ -2523,6 +2706,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="enable_word_confidence", @@ -2541,6 +2725,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2562,6 +2747,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="phrases", @@ -2580,6 +2766,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2601,6 +2788,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="alternatives", @@ -2619,6 +2807,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language_code", @@ -2637,6 +2826,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2658,6 +2848,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="transcript", @@ -2676,6 +2867,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -2694,6 +2886,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="words", @@ -2712,6 +2905,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2733,6 +2927,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time", @@ -2751,6 +2946,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time", @@ -2769,6 +2965,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="word", @@ -2787,6 +2984,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -2805,6 +3003,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speaker_tag", @@ -2823,6 +3022,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2844,6 +3044,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="x", @@ -2862,6 +3063,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="y", @@ -2880,6 +3082,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2901,6 +3104,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="vertices", @@ -2919,6 +3123,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2940,6 +3145,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -2958,6 +3164,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -2976,6 +3183,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -2994,6 +3202,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3015,6 +3224,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="rotated_bounding_box", @@ -3033,6 +3243,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -3051,6 +3262,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3072,6 +3284,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text", @@ -3090,6 +3303,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -3108,6 +3322,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3129,6 +3344,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="normalized_bounding_box", @@ -3147,6 +3363,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -3165,6 +3382,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3186,6 +3404,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -3204,6 +3423,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="track_id", @@ -3222,6 +3442,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="entity", @@ -3240,6 +3461,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -3258,6 +3480,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -3276,6 +3499,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3291,6 +3515,7 @@ full_name="google.cloud.videointelligence.v1.ObjectTrackingAnnotation.track_info", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ) ], @@ -3305,6 +3530,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity", @@ -3323,6 +3549,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tracks", @@ -3341,6 +3568,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -3359,6 +3587,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -4671,6 +4900,7 @@ file=DESCRIPTOR, index=0, serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, serialized_start=8239, serialized_end=8559, methods=[ @@ -4682,6 +4912,7 @@ input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002\030"\023/v1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', + create_key=_descriptor._internal_create_key, ) ], ) diff --git a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py index 8e4b6615..9c0ace0a 100644 --- a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py @@ -29,6 +29,7 @@ package="google.cloud.videointelligence.v1beta2", syntax="proto3", serialized_options=b"\n*com.google.cloud.videointelligence.v1beta2B\035VideoIntelligenceServiceProtoP\001ZWgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence\252\002&Google.Cloud.VideoIntelligence.V1Beta2\312\002&Google\\Cloud\\VideoIntelligence\\V1beta2\352\002)Google::Cloud::VideoIntelligence::V1beta2", + create_key=_descriptor._internal_create_key, serialized_pb=b'\nEgoogle/cloud/videointelligence_v1beta2/proto/video_intelligence.proto\x12&google.cloud.videointelligence.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x88\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x46\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32/.google.cloud.videointelligence.v1beta2.FeatureB\x03\xe0\x41\x02\x12K\n\rvideo_context\x18\x03 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xec\x03\n\x0cVideoContext\x12\x46\n\x08segments\x18\x01 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\\\n\x16label_detection_config\x18\x02 \x01(\x0b\x32<.google.cloud.videointelligence.v1beta2.LabelDetectionConfig\x12g\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig\x12q\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x46.google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig\x12Z\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32;.google.cloud.videointelligence.v1beta2.FaceDetectionConfig"\x9a\x01\n\x14LabelDetectionConfig\x12X\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1beta2.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"i\n\x0cLabelSegment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xa8\x02\n\x0fLabelAnnotation\x12>\n\x06\x65ntity\x18\x01 \x01(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12I\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12\x46\n\x08segments\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.LabelSegment\x12\x42\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1beta2.LabelFrame"\x9a\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12R\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x32.google.cloud.videointelligence.v1beta2.Likelihood"i\n\x19\x45xplicitContentAnnotation\x12L\n\x06\x66rames\x18\x01 \x03(\x0b\x32<.google.cloud.videointelligence.v1beta2.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"T\n\x0b\x46\x61\x63\x65Segment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment"\x9d\x01\n\tFaceFrame\x12`\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32=.google.cloud.videointelligence.v1beta2.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xad\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12\x45\n\x08segments\x18\x02 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1beta2.FaceSegment\x12\x41\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1beta2.FaceFrame"\xdf\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12Z\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12W\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12X\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12P\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1beta2.FaceAnnotation\x12N\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12^\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"s\n\x15\x41nnotateVideoResponse\x12Z\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1beta2.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"u\n\x15\x41nnotateVideoProgress\x12\\\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1beta2.VideoAnnotationProgress*\x86\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xca\x02\n\x18VideoIntelligenceService\x12\xd7\x01\n\rAnnotateVideo\x12<.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"i\x82\xd3\xe4\x93\x02\x1d"\x18/v1beta2/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xa4\x02\n*com.google.cloud.videointelligence.v1beta2B\x1dVideoIntelligenceServiceProtoP\x01ZWgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence\xaa\x02&Google.Cloud.VideoIntelligence.V1Beta2\xca\x02&Google\\Cloud\\VideoIntelligence\\V1beta2\xea\x02)Google::Cloud::VideoIntelligence::V1beta2b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -46,6 +47,7 @@ full_name="google.cloud.videointelligence.v1beta2.Feature", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="FEATURE_UNSPECIFIED", @@ -53,6 +55,7 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="LABEL_DETECTION", @@ -60,6 +63,7 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_CHANGE_DETECTION", @@ -67,6 +71,7 @@ number=2, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="EXPLICIT_CONTENT_DETECTION", @@ -74,9 +79,15 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FACE_DETECTION", index=4, number=4, serialized_options=None, type=None + name="FACE_DETECTION", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -92,6 +103,7 @@ full_name="google.cloud.videointelligence.v1beta2.LabelDetectionMode", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LABEL_DETECTION_MODE_UNSPECIFIED", @@ -99,12 +111,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SHOT_MODE", index=1, number=1, serialized_options=None, type=None + name="SHOT_MODE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FRAME_MODE", index=2, number=2, serialized_options=None, type=None + name="FRAME_MODE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_AND_FRAME_MODE", @@ -112,6 +135,7 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -127,6 +151,7 @@ full_name="google.cloud.videointelligence.v1beta2.Likelihood", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LIKELIHOOD_UNSPECIFIED", @@ -134,21 +159,47 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_UNLIKELY", index=1, number=1, serialized_options=None, type=None + name="VERY_UNLIKELY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UNLIKELY", index=2, number=2, serialized_options=None, type=None + name="UNLIKELY", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POSSIBLE", index=3, number=3, serialized_options=None, type=None + name="POSSIBLE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LIKELY", index=4, number=4, serialized_options=None, type=None + name="LIKELY", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_LIKELY", index=5, number=5, serialized_options=None, type=None + name="VERY_LIKELY", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -182,6 +233,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -200,6 +252,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_content", @@ -218,6 +271,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="features", @@ -236,6 +290,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_context", @@ -254,6 +309,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_uri", @@ -272,6 +328,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="location_id", @@ -290,6 +347,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -311,6 +369,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segments", @@ -329,6 +388,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="label_detection_config", @@ -347,6 +407,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_change_detection_config", @@ -365,6 +426,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_content_detection_config", @@ -383,6 +445,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="face_detection_config", @@ -401,6 +464,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -422,6 +486,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="label_detection_mode", @@ -440,6 +505,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stationary_camera", @@ -458,6 +524,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model", @@ -476,6 +543,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -497,6 +565,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -515,6 +584,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -536,6 +606,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -554,6 +625,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -575,6 +647,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -593,6 +666,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="include_bounding_boxes", @@ -611,6 +685,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -632,6 +707,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time_offset", @@ -650,6 +726,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time_offset", @@ -668,6 +745,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -689,6 +767,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -707,6 +786,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -725,6 +805,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -746,6 +827,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -764,6 +846,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -782,6 +865,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -803,6 +887,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity_id", @@ -821,6 +906,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -839,6 +925,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language_code", @@ -857,6 +944,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -878,6 +966,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity", @@ -896,6 +985,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="category_entities", @@ -914,6 +1004,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -932,6 +1023,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -950,6 +1042,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -971,6 +1064,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -989,6 +1083,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pornography_likelihood", @@ -1007,6 +1102,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1028,6 +1124,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="frames", @@ -1046,6 +1143,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1067,6 +1165,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="left", @@ -1085,6 +1184,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="top", @@ -1103,6 +1203,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="right", @@ -1121,6 +1222,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bottom", @@ -1139,6 +1241,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1160,6 +1263,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -1178,6 +1282,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1199,6 +1304,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="normalized_bounding_boxes", @@ -1217,6 +1323,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -1235,6 +1342,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1256,6 +1364,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="thumbnail", @@ -1274,6 +1383,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -1292,6 +1402,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -1310,6 +1421,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1331,6 +1443,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -1349,6 +1462,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment_label_annotations", @@ -1367,6 +1481,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_label_annotations", @@ -1385,6 +1500,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frame_label_annotations", @@ -1403,6 +1519,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="face_annotations", @@ -1421,6 +1538,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_annotations", @@ -1439,6 +1557,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_annotation", @@ -1457,6 +1576,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="error", @@ -1475,6 +1595,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1496,6 +1617,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_results", @@ -1514,6 +1636,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1535,6 +1658,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -1553,6 +1677,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="progress_percent", @@ -1571,6 +1696,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_time", @@ -1589,6 +1715,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_time", @@ -1607,6 +1734,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1628,6 +1756,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_progress", @@ -1646,6 +1775,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2256,6 +2386,7 @@ file=DESCRIPTOR, index=0, serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, serialized_start=4250, serialized_end=4580, methods=[ @@ -2267,6 +2398,7 @@ input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002\035"\030/v1beta2/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', + create_key=_descriptor._internal_create_key, ) ], ) diff --git a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py index c1cfdf44..4bbdc2cb 100644 --- a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py @@ -29,6 +29,7 @@ package="google.cloud.videointelligence.v1p1beta1", syntax="proto3", serialized_options=b"\n,com.google.cloud.videointelligence.v1p1beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P1Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p1beta1\352\002+Google::Cloud::VideoIntelligence::V1p1beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\nGgoogle/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p1beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x82\x04\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p1beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p1beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p1beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame"\xf5\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p1beta1.SpeechTranscription\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress"\x92\x02\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p1beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"s\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative"\x8e\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x41\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p1beta1.WordInfo"t\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t*\x8c\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p1beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xae\x02\n,com.google.cloud.videointelligence.v1p1beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P1Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p1beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p1beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -46,6 +47,7 @@ full_name="google.cloud.videointelligence.v1p1beta1.Feature", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="FEATURE_UNSPECIFIED", @@ -53,6 +55,7 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="LABEL_DETECTION", @@ -60,6 +63,7 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_CHANGE_DETECTION", @@ -67,6 +71,7 @@ number=2, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="EXPLICIT_CONTENT_DETECTION", @@ -74,6 +79,7 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SPEECH_TRANSCRIPTION", @@ -81,6 +87,7 @@ number=6, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -96,6 +103,7 @@ full_name="google.cloud.videointelligence.v1p1beta1.LabelDetectionMode", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LABEL_DETECTION_MODE_UNSPECIFIED", @@ -103,12 +111,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SHOT_MODE", index=1, number=1, serialized_options=None, type=None + name="SHOT_MODE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FRAME_MODE", index=2, number=2, serialized_options=None, type=None + name="FRAME_MODE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_AND_FRAME_MODE", @@ -116,6 +135,7 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -131,6 +151,7 @@ full_name="google.cloud.videointelligence.v1p1beta1.Likelihood", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LIKELIHOOD_UNSPECIFIED", @@ -138,21 +159,47 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_UNLIKELY", index=1, number=1, serialized_options=None, type=None + name="VERY_UNLIKELY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UNLIKELY", index=2, number=2, serialized_options=None, type=None + name="UNLIKELY", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POSSIBLE", index=3, number=3, serialized_options=None, type=None + name="POSSIBLE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LIKELY", index=4, number=4, serialized_options=None, type=None + name="LIKELY", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_LIKELY", index=5, number=5, serialized_options=None, type=None + name="VERY_LIKELY", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -186,6 +233,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -204,6 +252,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_content", @@ -222,6 +271,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="features", @@ -240,6 +290,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_context", @@ -258,6 +309,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_uri", @@ -276,6 +328,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="location_id", @@ -294,6 +347,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -315,6 +369,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segments", @@ -333,6 +388,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="label_detection_config", @@ -351,6 +407,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_change_detection_config", @@ -369,6 +426,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_content_detection_config", @@ -387,6 +445,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speech_transcription_config", @@ -405,6 +464,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -426,6 +486,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="label_detection_mode", @@ -444,6 +505,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stationary_camera", @@ -462,6 +524,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model", @@ -480,6 +543,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -501,6 +565,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -519,6 +584,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -540,6 +606,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -558,6 +625,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -579,6 +647,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time_offset", @@ -597,6 +666,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time_offset", @@ -615,6 +685,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -636,6 +707,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -654,6 +726,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -672,6 +745,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -693,6 +767,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -711,6 +786,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -729,6 +805,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -750,6 +827,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity_id", @@ -768,6 +846,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -786,6 +865,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language_code", @@ -804,6 +884,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -825,6 +906,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity", @@ -843,6 +925,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="category_entities", @@ -861,6 +944,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -879,6 +963,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -897,6 +982,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -918,6 +1004,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -936,6 +1023,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pornography_likelihood", @@ -954,6 +1042,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -975,6 +1064,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="frames", @@ -993,6 +1083,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1014,6 +1105,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -1032,6 +1124,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment_label_annotations", @@ -1050,6 +1143,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_label_annotations", @@ -1068,6 +1162,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frame_label_annotations", @@ -1086,6 +1181,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_annotations", @@ -1104,6 +1200,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_annotation", @@ -1122,6 +1219,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speech_transcriptions", @@ -1140,6 +1238,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="error", @@ -1158,6 +1257,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1179,6 +1279,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_results", @@ -1197,6 +1298,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1218,6 +1320,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -1236,6 +1339,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="progress_percent", @@ -1254,6 +1358,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_time", @@ -1272,6 +1377,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_time", @@ -1290,6 +1396,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1311,6 +1418,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_progress", @@ -1329,6 +1437,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1350,6 +1459,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="language_code", @@ -1368,6 +1478,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_alternatives", @@ -1386,6 +1497,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter_profanity", @@ -1404,6 +1516,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speech_contexts", @@ -1422,6 +1535,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="enable_automatic_punctuation", @@ -1440,6 +1554,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="audio_tracks", @@ -1458,6 +1573,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1479,6 +1595,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="phrases", @@ -1497,6 +1614,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1518,6 +1636,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="alternatives", @@ -1536,6 +1655,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1557,6 +1677,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="transcript", @@ -1575,6 +1696,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1593,6 +1715,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="words", @@ -1611,6 +1734,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1632,6 +1756,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time", @@ -1650,6 +1775,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time", @@ -1668,6 +1794,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="word", @@ -1686,6 +1813,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2366,6 +2494,7 @@ file=DESCRIPTOR, index=0, serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, serialized_start=4449, serialized_end=4783, methods=[ @@ -2377,6 +2506,7 @@ input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002\037"\032/v1p1beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', + create_key=_descriptor._internal_create_key, ) ], ) diff --git a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py index 66291643..40b8d60b 100644 --- a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py @@ -29,6 +29,7 @@ package="google.cloud.videointelligence.v1p2beta1", syntax="proto3", serialized_options=b"\n,com.google.cloud.videointelligence.v1p2beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P2Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p2beta1\352\002+Google::Cloud::VideoIntelligence::V1p2beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\nGgoogle/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p2beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p2beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xf6\x03\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p2beta1.ExplicitContentDetectionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.TextDetectionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p2beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"-\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p2beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p2beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcb\x05\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ExplicitContentAnnotation\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p2beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p2beta1.VideoAnnotationProgress"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p2beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p2beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p2beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x88\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame\x12G\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment*\x9b\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p2beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xae\x02\n,com.google.cloud.videointelligence.v1p2beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P2Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p2beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p2beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -46,6 +47,7 @@ full_name="google.cloud.videointelligence.v1p2beta1.Feature", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="FEATURE_UNSPECIFIED", @@ -53,6 +55,7 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="LABEL_DETECTION", @@ -60,6 +63,7 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_CHANGE_DETECTION", @@ -67,6 +71,7 @@ number=2, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="EXPLICIT_CONTENT_DETECTION", @@ -74,9 +79,15 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TEXT_DETECTION", index=4, number=7, serialized_options=None, type=None + name="TEXT_DETECTION", + index=4, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="OBJECT_TRACKING", @@ -84,6 +95,7 @@ number=9, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -99,6 +111,7 @@ full_name="google.cloud.videointelligence.v1p2beta1.LabelDetectionMode", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LABEL_DETECTION_MODE_UNSPECIFIED", @@ -106,12 +119,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SHOT_MODE", index=1, number=1, serialized_options=None, type=None + name="SHOT_MODE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FRAME_MODE", index=2, number=2, serialized_options=None, type=None + name="FRAME_MODE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_AND_FRAME_MODE", @@ -119,6 +143,7 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -134,6 +159,7 @@ full_name="google.cloud.videointelligence.v1p2beta1.Likelihood", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LIKELIHOOD_UNSPECIFIED", @@ -141,21 +167,47 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_UNLIKELY", index=1, number=1, serialized_options=None, type=None + name="VERY_UNLIKELY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UNLIKELY", index=2, number=2, serialized_options=None, type=None + name="UNLIKELY", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POSSIBLE", index=3, number=3, serialized_options=None, type=None + name="POSSIBLE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LIKELY", index=4, number=4, serialized_options=None, type=None + name="LIKELY", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_LIKELY", index=5, number=5, serialized_options=None, type=None + name="VERY_LIKELY", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -190,6 +242,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -208,6 +261,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_content", @@ -226,6 +280,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="features", @@ -244,6 +299,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_context", @@ -262,6 +318,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_uri", @@ -280,6 +337,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="location_id", @@ -298,6 +356,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -319,6 +378,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segments", @@ -337,6 +397,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="label_detection_config", @@ -355,6 +416,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_change_detection_config", @@ -373,6 +435,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_content_detection_config", @@ -391,6 +454,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_detection_config", @@ -409,6 +473,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -430,6 +495,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="label_detection_mode", @@ -448,6 +514,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stationary_camera", @@ -466,6 +533,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model", @@ -484,6 +552,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -505,6 +574,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -523,6 +593,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -544,6 +615,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -562,6 +634,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -583,6 +656,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="language_hints", @@ -601,6 +675,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -622,6 +697,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time_offset", @@ -640,6 +716,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time_offset", @@ -658,6 +735,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -679,6 +757,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -697,6 +776,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -715,6 +795,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -736,6 +817,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -754,6 +836,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -772,6 +855,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -793,6 +877,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity_id", @@ -811,6 +896,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -829,6 +915,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language_code", @@ -847,6 +934,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -868,6 +956,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity", @@ -886,6 +975,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="category_entities", @@ -904,6 +994,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -922,6 +1013,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -940,6 +1032,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -961,6 +1054,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -979,6 +1073,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pornography_likelihood", @@ -997,6 +1092,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1018,6 +1114,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="frames", @@ -1036,6 +1133,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1057,6 +1155,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="left", @@ -1075,6 +1174,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="top", @@ -1093,6 +1193,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="right", @@ -1111,6 +1212,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bottom", @@ -1129,6 +1231,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1150,6 +1253,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -1168,6 +1272,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment_label_annotations", @@ -1186,6 +1291,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_label_annotations", @@ -1204,6 +1310,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frame_label_annotations", @@ -1222,6 +1329,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_annotations", @@ -1240,6 +1348,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_annotation", @@ -1258,6 +1367,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_annotations", @@ -1276,6 +1386,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="object_annotations", @@ -1294,6 +1405,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="error", @@ -1312,6 +1424,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1333,6 +1446,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_results", @@ -1351,6 +1465,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1372,6 +1487,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -1390,6 +1506,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="progress_percent", @@ -1408,6 +1525,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_time", @@ -1426,6 +1544,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_time", @@ -1444,6 +1563,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1465,6 +1585,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_progress", @@ -1483,6 +1604,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1504,6 +1626,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="x", @@ -1522,6 +1645,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="y", @@ -1540,6 +1664,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1561,6 +1686,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="vertices", @@ -1579,6 +1705,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1600,6 +1727,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -1618,6 +1746,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1636,6 +1765,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -1654,6 +1784,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1675,6 +1806,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="rotated_bounding_box", @@ -1693,6 +1825,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -1711,6 +1844,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1732,6 +1866,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text", @@ -1750,6 +1885,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -1768,6 +1904,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1789,6 +1926,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="normalized_bounding_box", @@ -1807,6 +1945,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -1825,6 +1964,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1846,6 +1986,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity", @@ -1864,6 +2005,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1882,6 +2024,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -1900,6 +2043,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment", @@ -1918,6 +2062,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2635,6 +2780,7 @@ file=DESCRIPTOR, index=0, serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, serialized_start=4996, serialized_end=5330, methods=[ @@ -2646,6 +2792,7 @@ input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002\037"\032/v1p2beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', + create_key=_descriptor._internal_create_key, ) ], ) diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index 6ead460f..be79a2b4 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -29,6 +29,7 @@ package="google.cloud.videointelligence.v1p3beta1", syntax="proto3", serialized_options=b"\n,com.google.cloud.videointelligence.v1p3beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P3Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p3beta1", + create_key=_descriptor._internal_create_key, serialized_pb=b'\nGgoogle/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p3beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x80\x07\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig\x12\\\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig\x12`\n\x17person_detection_config\x18\x0b \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig\x12^\n\x16object_tracking_config\x18\r \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig"\xe4\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p3beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p3beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p3beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcf\x02\n\x11TimestampedObject\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12R\n\tlandmarks\x18\x04 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.DetectedLandmarkB\x03\xe0\x41\x01"\x99\x02\n\x05Track\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12X\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.TimestampedObject\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"D\n\tCelebrity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t"\xab\x02\n\x0e\x43\x65lebrityTrack\x12\x61\n\x0b\x63\x65lebrities\x18\x01 \x03(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity\x12\x43\n\nface_track\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x1aq\n\x13RecognizedCelebrity\x12\x46\n\tcelebrity\x18\x01 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.Celebrity\x12\x12\n\nconfidence\x18\x02 \x01(\x02"t\n\x1e\x43\x65lebrityRecognitionAnnotation\x12R\n\x10\x63\x65lebrity_tracks\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.CelebrityTrack"\x7f\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12I\n\x05point\x18\x02 \x01(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"m\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12?\n\x06tracks\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12\x11\n\tthumbnail\x18\x04 \x01(\x0c"\\\n\x19PersonDetectionAnnotation\x12?\n\x06tracks\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track"\xef\x0b\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12G\n\x07segment\x18\n \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x62\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.SpeechTranscription\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation\x12i\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation\x12i\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation\x12s\n!celebrity_recognition_annotations\x18\x15 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults"\xb4\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.Feature\x12G\n\x07segment\x18\x06 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress"\x88\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p3beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x8f\x01\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x93\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x46\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p3beta1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p3beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xac\x02\n\x18ObjectTrackingAnnotation\x12I\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameB\x0c\n\ntrack_info"\xe8\x01\n\x19LogoRecognitionAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12?\n\x06tracks\x18\x02 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"\xa5\x01\n\x1dStreamingAnnotateVideoRequest\x12V\n\x0cvideo_config\x18\x01 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfigH\x00\x12\x17\n\rinput_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request"\x8a\x08\n\x14StreamingVideoConfig\x12t\n\x1cshot_change_detection_config\x18\x02 \x01(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfigH\x00\x12i\n\x16label_detection_config\x18\x03 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfigH\x00\x12~\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32Q.google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfigH\x00\x12i\n\x16object_tracking_config\x18\x05 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfigH\x00\x12|\n automl_action_recognition_config\x18\x17 \x01(\x0b\x32P.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfigH\x00\x12u\n\x1c\x61utoml_classification_config\x18\x15 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfigH\x00\x12v\n\x1d\x61utoml_object_tracking_config\x18\x16 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfigH\x00\x12K\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1p3beta1.StreamingFeature\x12X\n\x0estorage_config\x18\x1e \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfigB\x12\n\x10streaming_config"\xca\x01\n\x1eStreamingAnnotateVideoResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x65\n\x12\x61nnotation_results\x18\x02 \x01(\x0b\x32I.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults\x12\x1e\n\x16\x61nnotation_results_uri\x18\x03 \x01(\t"\x8b\x03\n\x1fStreamingVideoAnnotationResults\x12P\n\x10shot_annotations\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12T\n\x11label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12`\n\x13\x65xplicit_annotation\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12^\n\x12object_annotations\x18\x04 \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation"$\n"StreamingShotChangeDetectionConfig":\n\x1dStreamingLabelDetectionConfig\x12\x19\n\x11stationary_camera\x18\x01 \x01(\x08")\n\'StreamingExplicitContentDetectionConfig"\x1f\n\x1dStreamingObjectTrackingConfig"<\n&StreamingAutomlActionRecognitionConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"9\n#StreamingAutomlClassificationConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"9\n#StreamingAutomlObjectTrackingConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"o\n\x16StreamingStorageConfig\x12(\n enable_storage_annotation_result\x18\x01 \x01(\x08\x12+\n#annotation_result_storage_directory\x18\x03 \x01(\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05*\xb6\x02\n\x10StreamingFeature\x12!\n\x1dSTREAMING_FEATURE_UNSPECIFIED\x10\x00\x12\x1d\n\x19STREAMING_LABEL_DETECTION\x10\x01\x12#\n\x1fSTREAMING_SHOT_CHANGE_DETECTION\x10\x02\x12(\n$STREAMING_EXPLICIT_CONTENT_DETECTION\x10\x03\x12\x1d\n\x19STREAMING_OBJECT_TRACKING\x10\x04\x12\'\n#STREAMING_AUTOML_ACTION_RECOGNITION\x10\x17\x12#\n\x1fSTREAMING_AUTOML_CLASSIFICATION\x10\x15\x12$\n STREAMING_AUTOML_OBJECT_TRACKING\x10\x16*\x90\x02\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x19\n\x15\x43\x45LEBRITY_RECOGNITION\x10\r\x12\x14\n\x10PERSON_DETECTION\x10\x0e\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p3beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platform2\xad\x02\n!StreamingVideoIntelligenceService\x12\xb1\x01\n\x16StreamingAnnotateVideo\x12G.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest\x1aH.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse"\x00(\x01\x30\x01\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x80\x02\n,com.google.cloud.videointelligence.v1p3beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P3Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p3beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -46,6 +47,7 @@ full_name="google.cloud.videointelligence.v1p3beta1.LabelDetectionMode", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LABEL_DETECTION_MODE_UNSPECIFIED", @@ -53,12 +55,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SHOT_MODE", index=1, number=1, serialized_options=None, type=None + name="SHOT_MODE", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FRAME_MODE", index=2, number=2, serialized_options=None, type=None + name="FRAME_MODE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_AND_FRAME_MODE", @@ -66,6 +79,7 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -81,6 +95,7 @@ full_name="google.cloud.videointelligence.v1p3beta1.Likelihood", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="LIKELIHOOD_UNSPECIFIED", @@ -88,21 +103,47 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_UNLIKELY", index=1, number=1, serialized_options=None, type=None + name="VERY_UNLIKELY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UNLIKELY", index=2, number=2, serialized_options=None, type=None + name="UNLIKELY", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POSSIBLE", index=3, number=3, serialized_options=None, type=None + name="POSSIBLE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="LIKELY", index=4, number=4, serialized_options=None, type=None + name="LIKELY", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="VERY_LIKELY", index=5, number=5, serialized_options=None, type=None + name="VERY_LIKELY", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -118,6 +159,7 @@ full_name="google.cloud.videointelligence.v1p3beta1.StreamingFeature", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="STREAMING_FEATURE_UNSPECIFIED", @@ -125,6 +167,7 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="STREAMING_LABEL_DETECTION", @@ -132,6 +175,7 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="STREAMING_SHOT_CHANGE_DETECTION", @@ -139,6 +183,7 @@ number=2, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="STREAMING_EXPLICIT_CONTENT_DETECTION", @@ -146,6 +191,7 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="STREAMING_OBJECT_TRACKING", @@ -153,6 +199,7 @@ number=4, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="STREAMING_AUTOML_ACTION_RECOGNITION", @@ -160,6 +207,7 @@ number=23, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="STREAMING_AUTOML_CLASSIFICATION", @@ -167,6 +215,7 @@ number=21, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="STREAMING_AUTOML_OBJECT_TRACKING", @@ -174,6 +223,7 @@ number=22, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -189,6 +239,7 @@ full_name="google.cloud.videointelligence.v1p3beta1.Feature", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="FEATURE_UNSPECIFIED", @@ -196,6 +247,7 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="LABEL_DETECTION", @@ -203,6 +255,7 @@ number=1, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SHOT_CHANGE_DETECTION", @@ -210,6 +263,7 @@ number=2, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="EXPLICIT_CONTENT_DETECTION", @@ -217,9 +271,15 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FACE_DETECTION", index=4, number=4, serialized_options=None, type=None + name="FACE_DETECTION", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="SPEECH_TRANSCRIPTION", @@ -227,9 +287,15 @@ number=6, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TEXT_DETECTION", index=6, number=7, serialized_options=None, type=None + name="TEXT_DETECTION", + index=6, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="OBJECT_TRACKING", @@ -237,6 +303,7 @@ number=9, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="LOGO_RECOGNITION", @@ -244,6 +311,7 @@ number=12, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CELEBRITY_RECOGNITION", @@ -251,6 +319,7 @@ number=13, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="PERSON_DETECTION", @@ -258,6 +327,7 @@ number=14, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -305,6 +375,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -323,6 +394,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_content", @@ -341,6 +413,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="features", @@ -359,6 +432,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_context", @@ -377,6 +451,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_uri", @@ -395,6 +470,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="location_id", @@ -413,6 +489,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -434,6 +511,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segments", @@ -452,6 +530,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="label_detection_config", @@ -470,6 +549,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_change_detection_config", @@ -488,6 +568,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_content_detection_config", @@ -506,6 +587,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="face_detection_config", @@ -524,6 +606,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speech_transcription_config", @@ -542,6 +625,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_detection_config", @@ -560,6 +644,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="person_detection_config", @@ -578,6 +663,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="object_tracking_config", @@ -596,6 +682,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -617,6 +704,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="label_detection_mode", @@ -635,6 +723,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stationary_camera", @@ -653,6 +742,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model", @@ -671,6 +761,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frame_confidence_threshold", @@ -689,6 +780,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_confidence_threshold", @@ -707,6 +799,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -728,6 +821,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -746,6 +840,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -767,6 +862,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -785,6 +881,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -806,6 +903,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -824,6 +922,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -845,6 +944,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -863,6 +963,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="include_bounding_boxes", @@ -881,6 +982,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="include_attributes", @@ -899,6 +1001,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -920,6 +1023,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="include_bounding_boxes", @@ -938,6 +1042,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="include_pose_landmarks", @@ -956,6 +1061,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="include_attributes", @@ -974,6 +1080,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -995,6 +1102,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="language_hints", @@ -1013,6 +1121,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model", @@ -1031,6 +1140,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1052,6 +1162,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time_offset", @@ -1070,6 +1181,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time_offset", @@ -1088,6 +1200,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1109,6 +1222,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -1127,6 +1241,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1145,6 +1260,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1166,6 +1282,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -1184,6 +1301,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1202,6 +1320,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1223,6 +1342,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity_id", @@ -1241,6 +1361,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -1259,6 +1380,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language_code", @@ -1277,6 +1399,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1298,6 +1421,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity", @@ -1316,6 +1440,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="category_entities", @@ -1334,6 +1459,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -1352,6 +1478,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -1370,6 +1497,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1391,6 +1519,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="time_offset", @@ -1409,6 +1538,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pornography_likelihood", @@ -1427,6 +1557,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1448,6 +1579,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="frames", @@ -1466,6 +1598,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -1487,6 +1620,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="left", @@ -1505,6 +1639,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="top", @@ -1523,6 +1658,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="right", @@ -1541,6 +1677,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bottom", @@ -1559,6 +1696,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1580,6 +1718,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="normalized_bounding_box", @@ -1598,6 +1737,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -1616,6 +1756,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="attributes", @@ -1634,6 +1775,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="landmarks", @@ -1652,6 +1794,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1673,6 +1816,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -1691,6 +1835,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="timestamped_objects", @@ -1709,6 +1854,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="attributes", @@ -1727,6 +1873,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1745,6 +1892,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1766,6 +1914,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1784,6 +1933,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1802,6 +1952,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -1820,6 +1971,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1841,6 +1993,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1859,6 +2012,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -1877,6 +2031,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -1895,6 +2050,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1916,6 +2072,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="celebrity", @@ -1934,6 +2091,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -1952,6 +2110,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1972,6 +2131,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="celebrities", @@ -1990,6 +2150,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="face_track", @@ -2008,6 +2169,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2029,6 +2191,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="celebrity_tracks", @@ -2047,6 +2210,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2068,6 +2232,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -2086,6 +2251,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="point", @@ -2104,6 +2270,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -2122,6 +2289,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2143,6 +2311,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="tracks", @@ -2161,6 +2330,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="thumbnail", @@ -2179,6 +2349,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2200,6 +2371,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="tracks", @@ -2218,6 +2390,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2239,6 +2412,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -2257,6 +2431,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment", @@ -2275,6 +2450,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment_label_annotations", @@ -2293,6 +2469,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment_presence_label_annotations", @@ -2311,6 +2488,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_label_annotations", @@ -2329,6 +2507,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_presence_label_annotations", @@ -2347,6 +2526,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frame_label_annotations", @@ -2365,6 +2545,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="face_detection_annotations", @@ -2383,6 +2564,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shot_annotations", @@ -2401,6 +2583,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_annotation", @@ -2419,6 +2602,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speech_transcriptions", @@ -2437,6 +2621,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_annotations", @@ -2455,6 +2640,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="object_annotations", @@ -2473,6 +2659,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="logo_recognition_annotations", @@ -2491,6 +2678,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="person_detection_annotations", @@ -2509,6 +2697,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="celebrity_recognition_annotations", @@ -2527,6 +2716,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="error", @@ -2545,6 +2735,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2566,6 +2757,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_results", @@ -2584,6 +2776,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2605,6 +2798,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -2623,6 +2817,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="progress_percent", @@ -2641,6 +2836,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_time", @@ -2659,6 +2855,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_time", @@ -2677,6 +2874,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="feature", @@ -2695,6 +2893,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segment", @@ -2713,6 +2912,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2734,6 +2934,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_progress", @@ -2752,6 +2953,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2773,6 +2975,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="language_code", @@ -2791,6 +2994,7 @@ extension_scope=None, serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_alternatives", @@ -2809,6 +3013,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter_profanity", @@ -2827,6 +3032,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speech_contexts", @@ -2845,6 +3051,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="enable_automatic_punctuation", @@ -2863,6 +3070,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="audio_tracks", @@ -2881,6 +3089,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="enable_speaker_diarization", @@ -2899,6 +3108,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="diarization_speaker_count", @@ -2917,6 +3127,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="enable_word_confidence", @@ -2935,6 +3146,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -2956,6 +3168,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="phrases", @@ -2974,6 +3187,7 @@ extension_scope=None, serialized_options=b"\340A\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -2995,6 +3209,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="alternatives", @@ -3013,6 +3228,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="language_code", @@ -3031,6 +3247,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3052,6 +3269,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="transcript", @@ -3070,6 +3288,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -3088,6 +3307,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="words", @@ -3106,6 +3326,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3127,6 +3348,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time", @@ -3145,6 +3367,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time", @@ -3163,6 +3386,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="word", @@ -3181,6 +3405,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -3199,6 +3424,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="speaker_tag", @@ -3217,6 +3443,7 @@ extension_scope=None, serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3238,6 +3465,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="x", @@ -3256,6 +3484,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="y", @@ -3274,6 +3503,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3295,6 +3525,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="vertices", @@ -3313,6 +3544,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -3334,6 +3566,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -3352,6 +3585,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -3370,6 +3604,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -3388,6 +3623,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3409,6 +3645,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="rotated_bounding_box", @@ -3427,6 +3664,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -3445,6 +3683,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3466,6 +3705,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text", @@ -3484,6 +3724,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -3502,6 +3743,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3523,6 +3765,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="normalized_bounding_box", @@ -3541,6 +3784,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -3559,6 +3803,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3580,6 +3825,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="segment", @@ -3598,6 +3844,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="track_id", @@ -3616,6 +3863,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="entity", @@ -3634,6 +3882,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence", @@ -3652,6 +3901,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="frames", @@ -3670,6 +3920,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3685,6 +3936,7 @@ full_name="google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.track_info", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ) ], @@ -3699,6 +3951,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entity", @@ -3717,6 +3970,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tracks", @@ -3735,6 +3989,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="segments", @@ -3753,6 +4008,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3774,6 +4030,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="video_config", @@ -3792,6 +4049,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_content", @@ -3810,6 +4068,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -3825,6 +4084,7 @@ full_name="google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest.streaming_request", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ) ], @@ -3839,6 +4099,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="shot_change_detection_config", @@ -3857,6 +4118,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="label_detection_config", @@ -3875,6 +4137,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_content_detection_config", @@ -3893,6 +4156,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="object_tracking_config", @@ -3911,6 +4175,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="automl_action_recognition_config", @@ -3929,6 +4194,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="automl_classification_config", @@ -3947,6 +4213,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="automl_object_tracking_config", @@ -3965,6 +4232,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="feature", @@ -3983,6 +4251,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="storage_config", @@ -4001,6 +4270,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -4016,6 +4286,7 @@ full_name="google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig.streaming_config", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ) ], @@ -4030,6 +4301,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="error", @@ -4048,6 +4320,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_results", @@ -4066,6 +4339,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_results_uri", @@ -4084,6 +4358,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -4105,6 +4380,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="shot_annotations", @@ -4123,6 +4399,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="label_annotations", @@ -4141,6 +4418,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="explicit_annotation", @@ -4159,6 +4437,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="object_annotations", @@ -4177,6 +4456,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -4198,6 +4478,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -4218,6 +4499,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="stationary_camera", @@ -4236,6 +4518,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -4257,6 +4540,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -4277,6 +4561,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -4297,6 +4582,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model_name", @@ -4315,6 +4601,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -4336,6 +4623,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model_name", @@ -4354,6 +4642,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -4375,6 +4664,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model_name", @@ -4393,6 +4683,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ) ], extensions=[], @@ -4414,6 +4705,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="enable_storage_annotation_result", @@ -4432,6 +4724,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_result_storage_directory", @@ -4450,6 +4743,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -6259,6 +6553,7 @@ file=DESCRIPTOR, index=0, serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, serialized_start=12068, serialized_end=12402, methods=[ @@ -6270,6 +6565,7 @@ input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=b'\202\323\344\223\002\037"\032/v1p3beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress', + create_key=_descriptor._internal_create_key, ) ], ) @@ -6284,6 +6580,7 @@ file=DESCRIPTOR, index=1, serialized_options=b"\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, serialized_start=12405, serialized_end=12706, methods=[ @@ -6295,6 +6592,7 @@ input_type=_STREAMINGANNOTATEVIDEOREQUEST, output_type=_STREAMINGANNOTATEVIDEORESPONSE, serialized_options=None, + create_key=_descriptor._internal_create_key, ) ], ) diff --git a/synth.metadata b/synth.metadata index 3b5c1c84..a9865881 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,15 +11,15 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "dec3204175104cef49bf21d685d5517caaf0058f", - "internalRef": "312689208" + "sha": "c4e37010d74071851ff24121f522e802231ac86e", + "internalRef": "313460921" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "7ee92820e64c0aea379781b82399d6b3f3c8655f" + "sha": "470789cee75ce93c41348ad6aa4c49363a80399b" } } ], From fe796fd282ee73cd9c4570cbe185c22b5fe7651b Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:18:19 -0700 Subject: [PATCH 12/17] Use correct resource type for DetachSubscriptionRequest PiperOrigin-RevId: 313488995 Source-Author: Google APIs Source-Date: Wed May 27 16:45:32 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: eafa840ceec23b44a5c21670288107c661252711 Source-Link: https://github.com/googleapis/googleapis/commit/eafa840ceec23b44a5c21670288107c661252711 --- .../proto/video_intelligence_pb2.py | 4 ++-- synth.metadata | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index be79a2b4..1eea9b54 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -6340,8 +6340,8 @@ { "DESCRIPTOR": _STREAMINGANNOTATEVIDEORESPONSE, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """\ ``StreamingAnnotateVideoResponse`` is the only message returned to - the client by ``StreamingAnnotateVideo``. A series of zero or more + "__doc__": """``StreamingAnnotateVideoResponse`` is the only message returned to the + client by ``StreamingAnnotateVideo``. A series of zero or more ``StreamingAnnotateVideoResponse`` messages are streamed back to the client. Attributes: diff --git a/synth.metadata b/synth.metadata index a9865881..7523486e 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c4e37010d74071851ff24121f522e802231ac86e", - "internalRef": "313460921" + "sha": "eafa840ceec23b44a5c21670288107c661252711", + "internalRef": "313488995" } }, { From db9cadc170659338b25998cf64660c648771465c Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:19:31 -0700 Subject: [PATCH 13/17] docs: update python docs template (#576) * docs: update python docs template * sphinx change Source-Author: kolea2 <45548808+kolea2@users.noreply.github.com> Source-Date: Wed May 27 20:44:34 2020 -0400 Source-Repo: googleapis/synthtool Source-Sha: 71b8a272549c06b5768d00fa48d3ae990e871bec Source-Link: https://github.com/googleapis/synthtool/commit/71b8a272549c06b5768d00fa48d3ae990e871bec --- docs/conf.py | 5 +---- .../proto/video_intelligence_pb2.py | 4 ++-- noxfile.py | 2 +- synth.metadata | 2 +- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 0b3148f0..35e69432 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,6 +38,7 @@ "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", + "recommonmark", ] # autodoc/autosummary flags @@ -49,10 +50,6 @@ # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index 1eea9b54..be79a2b4 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -6340,8 +6340,8 @@ { "DESCRIPTOR": _STREAMINGANNOTATEVIDEORESPONSE, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - "__doc__": """``StreamingAnnotateVideoResponse`` is the only message returned to the - client by ``StreamingAnnotateVideo``. A series of zero or more + "__doc__": """\ ``StreamingAnnotateVideoResponse`` is the only message returned to + the client by ``StreamingAnnotateVideo``. A series of zero or more ``StreamingAnnotateVideoResponse`` messages are streamed back to the client. Attributes: diff --git a/noxfile.py b/noxfile.py index 0c058608..67e8d7f4 100644 --- a/noxfile.py +++ b/noxfile.py @@ -138,7 +138,7 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") + session.install("sphinx", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/synth.metadata b/synth.metadata index 7523486e..864a74d0 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,7 +19,7 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "470789cee75ce93c41348ad6aa4c49363a80399b" + "sha": "71b8a272549c06b5768d00fa48d3ae990e871bec" } } ], From 8f9f18777b28275463f5f2ebcbe00189805b9f14 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:20:20 -0700 Subject: [PATCH 14/17] feat: add templates for python samples projects (#506) These templates will be used for templates in python-docs-samples and in Python client libraries. The README generation code is a modified version of https://github.com/GoogleCloudPlatform/python-docs-samples/tree/master/scripts/readme-gen. Co-authored-by: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Thu May 28 14:39:58 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: ffe10407ee2f261c799fb0d01bf32a8abc67ed1e Source-Link: https://github.com/googleapis/synthtool/commit/ffe10407ee2f261c799fb0d01bf32a8abc67ed1e --- .kokoro/samples/lint/common.cfg | 34 ++++++ .kokoro/samples/lint/continuous.cfg | 6 + .kokoro/samples/lint/periodic.cfg | 6 + .kokoro/samples/lint/presubmit.cfg | 6 + .kokoro/samples/python3.6/common.cfg | 34 ++++++ .kokoro/samples/python3.6/continuous.cfg | 7 ++ .kokoro/samples/python3.6/periodic.cfg | 6 + .kokoro/samples/python3.6/presubmit.cfg | 6 + .kokoro/samples/python3.7/common.cfg | 34 ++++++ .kokoro/samples/python3.7/continuous.cfg | 6 + .kokoro/samples/python3.7/periodic.cfg | 6 + .kokoro/samples/python3.7/presubmit.cfg | 6 + .kokoro/samples/python3.8/common.cfg | 34 ++++++ .kokoro/samples/python3.8/continuous.cfg | 6 + .kokoro/samples/python3.8/periodic.cfg | 6 + .kokoro/samples/python3.8/presubmit.cfg | 6 + .kokoro/test-samples.sh | 104 ++++++++++++++++++ scripts/decrypt-secrets.sh | 33 ++++++ scripts/readme-gen/readme_gen.py | 66 +++++++++++ scripts/readme-gen/templates/README.tmpl.rst | 87 +++++++++++++++ scripts/readme-gen/templates/auth.tmpl.rst | 9 ++ .../templates/auth_api_key.tmpl.rst | 14 +++ .../templates/install_deps.tmpl.rst | 29 +++++ .../templates/install_portaudio.tmpl.rst | 35 ++++++ synth.metadata | 2 +- testing/.gitignore | 3 + 26 files changed, 590 insertions(+), 1 deletion(-) create mode 100644 .kokoro/samples/lint/common.cfg create mode 100644 .kokoro/samples/lint/continuous.cfg create mode 100644 .kokoro/samples/lint/periodic.cfg create mode 100644 .kokoro/samples/lint/presubmit.cfg create mode 100644 .kokoro/samples/python3.6/common.cfg create mode 100644 .kokoro/samples/python3.6/continuous.cfg create mode 100644 .kokoro/samples/python3.6/periodic.cfg create mode 100644 .kokoro/samples/python3.6/presubmit.cfg create mode 100644 .kokoro/samples/python3.7/common.cfg create mode 100644 .kokoro/samples/python3.7/continuous.cfg create mode 100644 .kokoro/samples/python3.7/periodic.cfg create mode 100644 .kokoro/samples/python3.7/presubmit.cfg create mode 100644 .kokoro/samples/python3.8/common.cfg create mode 100644 .kokoro/samples/python3.8/continuous.cfg create mode 100644 .kokoro/samples/python3.8/periodic.cfg create mode 100644 .kokoro/samples/python3.8/presubmit.cfg create mode 100755 .kokoro/test-samples.sh create mode 100755 scripts/decrypt-secrets.sh create mode 100644 scripts/readme-gen/readme_gen.py create mode 100644 scripts/readme-gen/templates/README.tmpl.rst create mode 100644 scripts/readme-gen/templates/auth.tmpl.rst create mode 100644 scripts/readme-gen/templates/auth_api_key.tmpl.rst create mode 100644 scripts/readme-gen/templates/install_deps.tmpl.rst create mode 100644 scripts/readme-gen/templates/install_portaudio.tmpl.rst create mode 100644 testing/.gitignore diff --git a/.kokoro/samples/lint/common.cfg b/.kokoro/samples/lint/common.cfg new file mode 100644 index 00000000..883558c5 --- /dev/null +++ b/.kokoro/samples/lint/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "lint" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-videointelligence/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-videointelligence/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/lint/continuous.cfg b/.kokoro/samples/lint/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/lint/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/lint/periodic.cfg b/.kokoro/samples/lint/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/lint/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/lint/presubmit.cfg b/.kokoro/samples/lint/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/lint/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg new file mode 100644 index 00000000..f2c35a39 --- /dev/null +++ b/.kokoro/samples/python3.6/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.6" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-videointelligence/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-videointelligence/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.6/continuous.cfg b/.kokoro/samples/python3.6/continuous.cfg new file mode 100644 index 00000000..7218af14 --- /dev/null +++ b/.kokoro/samples/python3.6/continuous.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + diff --git a/.kokoro/samples/python3.6/periodic.cfg b/.kokoro/samples/python3.6/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.6/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/presubmit.cfg b/.kokoro/samples/python3.6/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.6/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg new file mode 100644 index 00000000..c5274327 --- /dev/null +++ b/.kokoro/samples/python3.7/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.7" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-videointelligence/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-videointelligence/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.7/continuous.cfg b/.kokoro/samples/python3.7/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.7/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.7/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.7/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/presubmit.cfg b/.kokoro/samples/python3.7/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.7/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg new file mode 100644 index 00000000..6c613929 --- /dev/null +++ b/.kokoro/samples/python3.8/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.8" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-videointelligence/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-videointelligence/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.8/continuous.cfg b/.kokoro/samples/python3.8/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.8/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.8/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.8/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/presubmit.cfg b/.kokoro/samples/python3.8/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.8/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh new file mode 100755 index 00000000..d7321425 --- /dev/null +++ b/.kokoro/test-samples.sh @@ -0,0 +1,104 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +cd github/python-videointelligence + +# Run periodic samples tests at latest release +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + LATEST_RELEASE=$(git describe --abbrev=0 --tags) + git checkout $LATEST_RELEASE +fi + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Install nox +python3.6 -m pip install --upgrade --quiet nox + +# Use secrets acessor service account to get secrets +if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then + gcloud auth activate-service-account \ + --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ + --project="cloud-devrel-kokoro-resources" +fi + +# This script will create 3 files: +# - testing/test-env.sh +# - testing/service-account.json +# - testing/client-secrets.json +./scripts/decrypt-secrets.sh + +source ./testing/test-env.sh +export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json + +# For cloud-run session, we activate the service account for gcloud sdk. +gcloud auth activate-service-account \ + --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" + +export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json + +echo -e "\n******************** TESTING PROJECTS ********************" + +# Switch to 'fail at end' to allow all tests to complete before exiting. +set +e +# Use RTN to return a non-zero value if the test fails. +RTN=0 +ROOT=$(pwd) +# Find all requirements.txt in the samples directory (may break on whitespace). +for file in samples/**/requirements.txt; do + cd "$ROOT" + # Navigate to the project folder. + file=$(dirname "$file") + cd "$file" + + echo "------------------------------------------------------------" + echo "- testing $file" + echo "------------------------------------------------------------" + + # Use nox to execute the tests for the project. + python3.6 -m nox -s "$RUN_TESTS_SESSION" + EXIT=$? + + # If this is a periodic build, send the test log to the Build Cop Bot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop. + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop + $KOKORO_GFILE_DIR/linux_amd64/buildcop + fi + + if [[ $EXIT -ne 0 ]]; then + RTN=1 + echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" + else + echo -e "\n Testing completed.\n" + fi + +done +cd "$ROOT" + +# Workaround for Kokoro permissions issue: delete secrets +rm testing/{test-env.sh,client-secrets.json,service-account.json} + +exit "$RTN" \ No newline at end of file diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh new file mode 100755 index 00000000..ff599eb2 --- /dev/null +++ b/scripts/decrypt-secrets.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +ROOT=$( dirname "$DIR" ) + +# Work from the project root. +cd $ROOT + +# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. +PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" + +gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + > testing/test-env.sh +gcloud secrets versions access latest \ + --secret="python-docs-samples-service-account" \ + > testing/service-account.json +gcloud secrets versions access latest \ + --secret="python-docs-samples-client-secrets" \ + > testing/client-secrets.json \ No newline at end of file diff --git a/scripts/readme-gen/readme_gen.py b/scripts/readme-gen/readme_gen.py new file mode 100644 index 00000000..d309d6e9 --- /dev/null +++ b/scripts/readme-gen/readme_gen.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generates READMEs using configuration defined in yaml.""" + +import argparse +import io +import os +import subprocess + +import jinja2 +import yaml + + +jinja_env = jinja2.Environment( + trim_blocks=True, + loader=jinja2.FileSystemLoader( + os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')))) + +README_TMPL = jinja_env.get_template('README.tmpl.rst') + + +def get_help(file): + return subprocess.check_output(['python', file, '--help']).decode() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('source') + parser.add_argument('--destination', default='README.rst') + + args = parser.parse_args() + + source = os.path.abspath(args.source) + root = os.path.dirname(source) + destination = os.path.join(root, args.destination) + + jinja_env.globals['get_help'] = get_help + + with io.open(source, 'r') as f: + config = yaml.load(f) + + # This allows get_help to execute in the right directory. + os.chdir(root) + + output = README_TMPL.render(config) + + with io.open(destination, 'w') as f: + f.write(output) + + +if __name__ == '__main__': + main() diff --git a/scripts/readme-gen/templates/README.tmpl.rst b/scripts/readme-gen/templates/README.tmpl.rst new file mode 100644 index 00000000..4fd23976 --- /dev/null +++ b/scripts/readme-gen/templates/README.tmpl.rst @@ -0,0 +1,87 @@ +{# The following line is a lie. BUT! Once jinja2 is done with it, it will + become truth! #} +.. This file is automatically generated. Do not edit this file directly. + +{{product.name}} Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/README.rst + + +This directory contains samples for {{product.name}}. {{product.description}} + +{{description}} + +.. _{{product.name}}: {{product.url}} + +{% if required_api_url %} +To run the sample, you need to enable the API at: {{required_api_url}} +{% endif %} + +{% if required_role %} +To run the sample, you need to have `{{required_role}}` role. +{% endif %} + +{{other_required_steps}} + +{% if setup %} +Setup +------------------------------------------------------------------------------- + +{% for section in setup %} + +{% include section + '.tmpl.rst' %} + +{% endfor %} +{% endif %} + +{% if samples %} +Samples +------------------------------------------------------------------------------- + +{% for sample in samples %} +{{sample.name}} ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +{% if not sample.hide_cloudshell_button %} +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/{{sample.file}},{{folder}}/README.rst +{% endif %} + + +{{sample.description}} + +To run this sample: + +.. code-block:: bash + + $ python {{sample.file}} +{% if sample.show_help %} + + {{get_help(sample.file)|indent}} +{% endif %} + + +{% endfor %} +{% endif %} + +{% if cloud_client_library %} + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + +{% endif %} + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/scripts/readme-gen/templates/auth.tmpl.rst b/scripts/readme-gen/templates/auth.tmpl.rst new file mode 100644 index 00000000..1446b94a --- /dev/null +++ b/scripts/readme-gen/templates/auth.tmpl.rst @@ -0,0 +1,9 @@ +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started diff --git a/scripts/readme-gen/templates/auth_api_key.tmpl.rst b/scripts/readme-gen/templates/auth_api_key.tmpl.rst new file mode 100644 index 00000000..11957ce2 --- /dev/null +++ b/scripts/readme-gen/templates/auth_api_key.tmpl.rst @@ -0,0 +1,14 @@ +Authentication +++++++++++++++ + +Authentication for this service is done via an `API Key`_. To obtain an API +Key: + +1. Open the `Cloud Platform Console`_ +2. Make sure that billing is enabled for your project. +3. From the **Credentials** page, create a new **API Key** or use an existing + one for your project. + +.. _API Key: + https://developers.google.com/api-client-library/python/guide/aaa_apikeys +.. _Cloud Console: https://console.cloud.google.com/project?_ diff --git a/scripts/readme-gen/templates/install_deps.tmpl.rst b/scripts/readme-gen/templates/install_deps.tmpl.rst new file mode 100644 index 00000000..a0406dba --- /dev/null +++ b/scripts/readme-gen/templates/install_deps.tmpl.rst @@ -0,0 +1,29 @@ +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ diff --git a/scripts/readme-gen/templates/install_portaudio.tmpl.rst b/scripts/readme-gen/templates/install_portaudio.tmpl.rst new file mode 100644 index 00000000..5ea33d18 --- /dev/null +++ b/scripts/readme-gen/templates/install_portaudio.tmpl.rst @@ -0,0 +1,35 @@ +Install PortAudio ++++++++++++++++++ + +Install `PortAudio`_. This is required by the `PyAudio`_ library to stream +audio from your computer's microphone. PyAudio depends on PortAudio for cross-platform compatibility, and is installed differently depending on the +platform. + +* For Mac OS X, you can use `Homebrew`_:: + + brew install portaudio + + **Note**: if you encounter an error when running `pip install` that indicates + it can't find `portaudio.h`, try running `pip install` with the following + flags:: + + pip install --global-option='build_ext' \ + --global-option='-I/usr/local/include' \ + --global-option='-L/usr/local/lib' \ + pyaudio + +* For Debian / Ubuntu Linux:: + + apt-get install portaudio19-dev python-all-dev + +* Windows may work without having to install PortAudio explicitly (it will get + installed with PyAudio). + +For more details, see the `PyAudio installation`_ page. + + +.. _PyAudio: https://people.csail.mit.edu/hubert/pyaudio/ +.. _PortAudio: http://www.portaudio.com/ +.. _PyAudio installation: + https://people.csail.mit.edu/hubert/pyaudio/#downloads +.. _Homebrew: http://brew.sh diff --git a/synth.metadata b/synth.metadata index 864a74d0..16e7fff3 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,7 +19,7 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "71b8a272549c06b5768d00fa48d3ae990e871bec" + "sha": "ffe10407ee2f261c799fb0d01bf32a8abc67ed1e" } } ], diff --git a/testing/.gitignore b/testing/.gitignore new file mode 100644 index 00000000..b05fbd63 --- /dev/null +++ b/testing/.gitignore @@ -0,0 +1,3 @@ +test-env.sh +service-account.json +client-secrets.json \ No newline at end of file From 79cf5ec2095ffccebeda095e2e5792ffd944d078 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:20:45 -0700 Subject: [PATCH 15/17] chore: update default ignores for Python libraries (#586) Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Thu May 28 14:43:37 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: e99975b6b49827b8720f0a885e218dbdb67849ca Source-Link: https://github.com/googleapis/synthtool/commit/e99975b6b49827b8720f0a885e218dbdb67849ca --- .flake8 | 2 ++ .gitignore | 2 ++ MANIFEST.in | 3 +++ synth.metadata | 2 +- 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.flake8 b/.flake8 index 20fe9bda..ed931638 100644 --- a/.flake8 +++ b/.flake8 @@ -21,6 +21,8 @@ exclude = # Exclude generated code. **/proto/** **/gapic/** + **/services/** + **/types/** *_pb2.py # Standard linting exemptions. diff --git a/.gitignore b/.gitignore index 3fb06e09..b87e1ed5 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ dist build eggs +.eggs parts bin var @@ -49,6 +50,7 @@ bigquery/docs/generated # Virtual environment env/ coverage.xml +sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/MANIFEST.in b/MANIFEST.in index 68855abc..e9e29d12 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -20,3 +20,6 @@ recursive-include google *.json *.proto recursive-include tests * global-exclude *.py[co] global-exclude __pycache__ + +# Exclude scripts for samples readmegen +prune scripts/readme-gen \ No newline at end of file diff --git a/synth.metadata b/synth.metadata index 16e7fff3..2458ad40 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,7 +19,7 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ffe10407ee2f261c799fb0d01bf32a8abc67ed1e" + "sha": "e99975b6b49827b8720f0a885e218dbdb67849ca" } } ], From 2033698ea81962ac9ff7c44c479fe3ae2d90fdd3 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 30 May 2020 10:20:45 -0700 Subject: [PATCH 16/17] feat: allow custom python versions in noxfile (#585) Libraries on the microgenerator support a smaller range of Python versions (3.6+). Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Thu May 28 18:22:04 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 4e1d2cb79b02d7496b1452f91c518630c207145e Source-Link: https://github.com/googleapis/synthtool/commit/4e1d2cb79b02d7496b1452f91c518630c207145e --- noxfile.py | 17 +++++++++-------- synth.metadata | 2 +- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/noxfile.py b/noxfile.py index 67e8d7f4..020a25a7 100644 --- a/noxfile.py +++ b/noxfile.py @@ -26,11 +26,12 @@ BLACK_VERSION = "black==19.3b0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -if os.path.exists("samples"): - BLACK_PATHS.append("samples") +DEFAULT_PYTHON_VERSION = "3.7" +SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.7"] +UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): """Run linters. @@ -56,7 +57,7 @@ def blacken(session): session.run("black", *BLACK_PATHS) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" session.install("docutils", "pygments") @@ -84,13 +85,13 @@ def default(session): ) -@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) def unit(session): """Run the unit test suite.""" default(session) -@nox.session(python=["2.7", "3.7"]) +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") @@ -120,7 +121,7 @@ def system(session): session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def cover(session): """Run the final coverage report. @@ -133,7 +134,7 @@ def cover(session): session.run("coverage", "erase") -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def docs(session): """Build the docs for this library.""" diff --git a/synth.metadata b/synth.metadata index 2458ad40..c904376f 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,7 +19,7 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "e99975b6b49827b8720f0a885e218dbdb67849ca" + "sha": "4e1d2cb79b02d7496b1452f91c518630c207145e" } } ], From 69b387f4d60cbfd6ec6c3075ea8670fbacc1423e Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Tue, 9 Jun 2020 20:20:37 +0000 Subject: [PATCH 17/17] docs: fix docs --- docs/gapic/v1/types.rst | 3 +- docs/gapic/v1beta2/types.rst | 3 +- docs/gapic/v1p1beta1/types.rst | 3 +- docs/gapic/v1p2beta1/types.rst | 3 +- docs/gapic/v1p3beta1/types.rst | 3 +- docs/index.rst | 2 + google/cloud/videointelligence_v1/__init__.py | 4 +- .../proto/video_intelligence_pb2.py | 40 ++++++++++++++ .../videointelligence_v1beta2/__init__.py | 4 +- .../proto/video_intelligence_pb2.py | 21 ++++++++ .../videointelligence_v1p1beta1/__init__.py | 4 +- .../proto/video_intelligence_pb2.py | 21 ++++++++ .../videointelligence_v1p2beta1/__init__.py | 4 +- .../proto/video_intelligence_pb2.py | 25 +++++++++ .../videointelligence_v1p3beta1/__init__.py | 4 +- .../proto/video_intelligence_pb2.py | 53 +++++++++++++++++++ synth.metadata | 10 ++-- synth.py | 17 ++++++ 18 files changed, 204 insertions(+), 20 deletions(-) diff --git a/docs/gapic/v1/types.rst b/docs/gapic/v1/types.rst index d8508c28..d003b9ca 100644 --- a/docs/gapic/v1/types.rst +++ b/docs/gapic/v1/types.rst @@ -2,4 +2,5 @@ Types for Cloud Video Intelligence API Client ============================================= .. automodule:: google.cloud.videointelligence_v1.types - :members: \ No newline at end of file + :members: + :noindex: \ No newline at end of file diff --git a/docs/gapic/v1beta2/types.rst b/docs/gapic/v1beta2/types.rst index f782d4c9..4e1f5f6c 100644 --- a/docs/gapic/v1beta2/types.rst +++ b/docs/gapic/v1beta2/types.rst @@ -2,4 +2,5 @@ Types for Google Cloud Video Intelligence API Client ==================================================== .. automodule:: google.cloud.videointelligence_v1beta2.types - :members: \ No newline at end of file + :members: + :noindex: \ No newline at end of file diff --git a/docs/gapic/v1p1beta1/types.rst b/docs/gapic/v1p1beta1/types.rst index fc72d780..e18e529a 100644 --- a/docs/gapic/v1p1beta1/types.rst +++ b/docs/gapic/v1p1beta1/types.rst @@ -2,4 +2,5 @@ Types for Cloud Video Intelligence API Client ============================================= .. automodule:: google.cloud.videointelligence_v1p1beta1.types - :members: \ No newline at end of file + :members: + :noindex: \ No newline at end of file diff --git a/docs/gapic/v1p2beta1/types.rst b/docs/gapic/v1p2beta1/types.rst index 4806fa77..2db5249b 100644 --- a/docs/gapic/v1p2beta1/types.rst +++ b/docs/gapic/v1p2beta1/types.rst @@ -2,4 +2,5 @@ Types for Cloud Video Intelligence API Client ============================================= .. automodule:: google.cloud.videointelligence_v1p2beta1.types - :members: \ No newline at end of file + :members: + :noindex: \ No newline at end of file diff --git a/docs/gapic/v1p3beta1/types.rst b/docs/gapic/v1p3beta1/types.rst index 9f86eee1..e6f2094e 100644 --- a/docs/gapic/v1p3beta1/types.rst +++ b/docs/gapic/v1p3beta1/types.rst @@ -2,4 +2,5 @@ Types for Cloud Video Intelligence API Client ============================================= .. automodule:: google.cloud.videointelligence_v1p3beta1.types - :members: \ No newline at end of file + :members: + :noindex: \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 5a888812..a7b14385 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,5 +1,7 @@ .. include:: README.rst +.. include:: multiprocessing.rst + API Reference ------------- diff --git a/google/cloud/videointelligence_v1/__init__.py b/google/cloud/videointelligence_v1/__init__.py index 80ce76c6..a95658db 100644 --- a/google/cloud/videointelligence_v1/__init__.py +++ b/google/cloud/videointelligence_v1/__init__.py @@ -26,8 +26,8 @@ if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) diff --git a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py index 954049a6..916b8344 100644 --- a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py @@ -3830,6 +3830,7 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video annotation request. + Attributes: input_uri: Input video location. Currently, only `Google Cloud Storage @@ -3879,6 +3880,7 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. + Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -3911,6 +3913,7 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. + Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -3953,6 +3956,7 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. + Attributes: model: Model to use for shot change detection. Supported values: @@ -3970,6 +3974,7 @@ "DESCRIPTOR": _OBJECTTRACKINGCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for OBJECT_TRACKING. + Attributes: model: Model to use for object tracking. Supported values: @@ -3987,6 +3992,7 @@ "DESCRIPTOR": _FACEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for FACE_DETECTION. + Attributes: model: Model to use for face detection. Supported values: @@ -4007,6 +4013,7 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model: Model to use for explicit content detection. Supported values: @@ -4024,6 +4031,7 @@ "DESCRIPTOR": _TEXTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for TEXT_DETECTION. + Attributes: language_hints: Language hint can be specified if the language to be detected @@ -4047,6 +4055,7 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video segment. + Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -4067,6 +4076,7 @@ "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for label detection. + Attributes: segment: Video segment where a label was detected. @@ -4085,6 +4095,7 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -4104,6 +4115,7 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. + Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -4126,6 +4138,7 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Label annotation. + Attributes: entity: Detected entity. @@ -4151,6 +4164,7 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -4172,6 +4186,7 @@ "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. + Attributes: frames: All video frames where explicit content was detected. @@ -4189,6 +4204,7 @@ "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1]. + Attributes: left: Left X coordinate. @@ -4211,6 +4227,7 @@ "DESCRIPTOR": _FACESEGMENT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for face detection. + Attributes: segment: Video segment where a face was detected. @@ -4227,6 +4244,7 @@ "DESCRIPTOR": _FACEFRAME, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for face detection. + Attributes: normalized_bounding_boxes: Normalized Bounding boxes in a frame. There can be more than @@ -4248,6 +4266,7 @@ "DESCRIPTOR": _FACEANNOTATION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Face annotation. + Attributes: thumbnail: Thumbnail of a representative face view (in JPEG format). @@ -4269,6 +4288,7 @@ "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """For tracking related features. An object at time_offset with attributes, and located with normalized_bounding_box. + Attributes: normalized_bounding_box: Normalized Bounding box in a frame, where the object is @@ -4293,6 +4313,7 @@ "DESCRIPTOR": _TRACK, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """A track of an object instance. + Attributes: segment: Video segment of a track. @@ -4316,6 +4337,7 @@ "DESCRIPTOR": _DETECTEDATTRIBUTE, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """A generic detected attribute represented by name in string format. + Attributes: name: The name of the attribute, i.e. glasses, dark_glasses, @@ -4340,6 +4362,7 @@ "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """A generic detected landmark represented by name in string format and a 2D location. + Attributes: name: The name of this landmark, i.e. left_hand, right_shoulder. @@ -4362,6 +4385,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. + Attributes: input_uri: Video file location in `Google Cloud Storage @@ -4432,6 +4456,7 @@ "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_results: Annotation results for all videos specified in @@ -4449,6 +4474,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. + Attributes: input_uri: Video file location in `Google Cloud Storage @@ -4481,6 +4507,7 @@ "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_progress: Progress metadata for all videos specified in @@ -4498,6 +4525,7 @@ "DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Config for SPEECH_TRANSCRIPTION. + Attributes: language_code: Required. *Required* The language of the supplied audio as a @@ -4567,6 +4595,7 @@ "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Provides “hints” to the speech recognizer to favor specific words and phrases in the results. + Attributes: phrases: Optional. A list of strings containing words and phrases @@ -4590,6 +4619,7 @@ "DESCRIPTOR": _SPEECHTRANSCRIPTION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """A speech recognition result corresponding to a portion of the audio. + Attributes: alternatives: May contain one or more recognition hypotheses (up to the @@ -4615,6 +4645,7 @@ "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Alternative hypotheses (a.k.a. n-best list). + Attributes: transcript: Transcript text representing the words that the user spoke. @@ -4646,6 +4677,7 @@ "__doc__": """Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as ``enable_word_time_offsets``. + Attributes: start_time: Time offset relative to the beginning of the audio, and @@ -4688,6 +4720,7 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """X coordinate. + Attributes: y: Y coordinate. @@ -4711,6 +4744,7 @@ becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box. + Attributes: vertices: Normalized vertices of the bounding polygon. @@ -4727,6 +4761,7 @@ "DESCRIPTOR": _TEXTSEGMENT, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for text detection. + Attributes: segment: Video segment where a text snippet was detected. @@ -4751,6 +4786,7 @@ "__doc__": """Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets. + Attributes: rotated_bounding_box: Bounding polygon of the detected text for this frame. @@ -4771,6 +4807,7 @@ "__doc__": """Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection. + Attributes: text: The detected text. @@ -4790,6 +4827,7 @@ "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence. + Attributes: normalized_bounding_box: The normalized bounding box location of this object track for @@ -4809,6 +4847,7 @@ "DESCRIPTOR": _OBJECTTRACKINGANNOTATION, "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Annotations corresponding to one tracked object. + Attributes: track_info: Different representation of tracking info in non-streaming @@ -4848,6 +4887,7 @@ "__module__": "google.cloud.videointelligence_v1.proto.video_intelligence_pb2", "__doc__": """Annotation corresponding to one detected, tracked and recognized logo class. + Attributes: entity: Entity category information to specify the logo class that all diff --git a/google/cloud/videointelligence_v1beta2/__init__.py b/google/cloud/videointelligence_v1beta2/__init__.py index e6ed7610..4cf8a876 100644 --- a/google/cloud/videointelligence_v1beta2/__init__.py +++ b/google/cloud/videointelligence_v1beta2/__init__.py @@ -28,8 +28,8 @@ if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) diff --git a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py index 9c0ace0a..9ef7a5b3 100644 --- a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py @@ -1906,6 +1906,7 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video annotation request. + Attributes: input_uri: Input video location. Currently, only `Google Cloud Storage @@ -1955,6 +1956,7 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. + Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -1981,6 +1983,7 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. + Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -2007,6 +2010,7 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. + Attributes: model: Model to use for shot change detection. Supported values: @@ -2024,6 +2028,7 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model: Model to use for explicit content detection. Supported values: @@ -2041,6 +2046,7 @@ "DESCRIPTOR": _FACEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Config for FACE_DETECTION. + Attributes: model: Model to use for face detection. Supported values: @@ -2061,6 +2067,7 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video segment. + Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -2081,6 +2088,7 @@ "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for label detection. + Attributes: segment: Video segment where a label was detected. @@ -2099,6 +2107,7 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2118,6 +2127,7 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. + Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -2140,6 +2150,7 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Label annotation. + Attributes: entity: Detected entity. @@ -2165,6 +2176,7 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2186,6 +2198,7 @@ "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. + Attributes: frames: All video frames where explicit content was detected. @@ -2203,6 +2216,7 @@ "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1]. + Attributes: left: Left X coordinate. @@ -2225,6 +2239,7 @@ "DESCRIPTOR": _FACESEGMENT, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for face detection. + Attributes: segment: Video segment where a face was detected. @@ -2241,6 +2256,7 @@ "DESCRIPTOR": _FACEFRAME, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for face detection. + Attributes: normalized_bounding_boxes: Normalized Bounding boxes in a frame. There can be more than @@ -2262,6 +2278,7 @@ "DESCRIPTOR": _FACEANNOTATION, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Face annotation. + Attributes: thumbnail: Thumbnail of a representative face view (in JPEG format). @@ -2282,6 +2299,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. + Attributes: input_uri: Video file location in `Google Cloud Storage @@ -2321,6 +2339,7 @@ "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_results: Annotation results for all videos specified in @@ -2338,6 +2357,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. + Attributes: input_uri: Video file location in `Google Cloud Storage @@ -2364,6 +2384,7 @@ "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_progress: Progress metadata for all videos specified in diff --git a/google/cloud/videointelligence_v1p1beta1/__init__.py b/google/cloud/videointelligence_v1p1beta1/__init__.py index cfb54486..87d45dec 100644 --- a/google/cloud/videointelligence_v1p1beta1/__init__.py +++ b/google/cloud/videointelligence_v1p1beta1/__init__.py @@ -28,8 +28,8 @@ if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) diff --git a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py index 4bbdc2cb..07b0d1e1 100644 --- a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py @@ -1952,6 +1952,7 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video annotation request. + Attributes: input_uri: Input video location. Currently, only `Google Cloud Storage @@ -2001,6 +2002,7 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. + Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -2027,6 +2029,7 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. + Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -2053,6 +2056,7 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. + Attributes: model: Model to use for shot change detection. Supported values: @@ -2070,6 +2074,7 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model: Model to use for explicit content detection. Supported values: @@ -2087,6 +2092,7 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video segment. + Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -2107,6 +2113,7 @@ "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for label detection. + Attributes: segment: Video segment where a label was detected. @@ -2125,6 +2132,7 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2144,6 +2152,7 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. + Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -2166,6 +2175,7 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Label annotation. + Attributes: entity: Detected entity. @@ -2191,6 +2201,7 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2212,6 +2223,7 @@ "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. + Attributes: frames: All video frames where explicit content was detected. @@ -2228,6 +2240,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. + Attributes: input_uri: Output only. Video file location in `Google Cloud Storage @@ -2266,6 +2279,7 @@ "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_results: Annotation results for all videos specified in @@ -2283,6 +2297,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. + Attributes: input_uri: Output only. Video file location in `Google Cloud Storage @@ -2309,6 +2324,7 @@ "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_progress: Progress metadata for all videos specified in @@ -2326,6 +2342,7 @@ "DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Config for SPEECH_TRANSCRIPTION. + Attributes: language_code: Required. *Required* The language of the supplied audio as a @@ -2377,6 +2394,7 @@ "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Provides “hints” to the speech recognizer to favor specific words and phrases in the results. + Attributes: phrases: Optional. A list of strings containing words and phrases @@ -2400,6 +2418,7 @@ "DESCRIPTOR": _SPEECHTRANSCRIPTION, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """A speech recognition result corresponding to a portion of the audio. + Attributes: alternatives: May contain one or more recognition hypotheses (up to the @@ -2420,6 +2439,7 @@ "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, "__module__": "google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", "__doc__": """Alternative hypotheses (a.k.a. n-best list). + Attributes: transcript: Output only. Transcript text representing the words that the @@ -2450,6 +2470,7 @@ "__doc__": """Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as ``enable_word_time_offsets``. + Attributes: start_time: Output only. Time offset relative to the beginning of the diff --git a/google/cloud/videointelligence_v1p2beta1/__init__.py b/google/cloud/videointelligence_v1p2beta1/__init__.py index 65d0bec6..12809a17 100644 --- a/google/cloud/videointelligence_v1p2beta1/__init__.py +++ b/google/cloud/videointelligence_v1p2beta1/__init__.py @@ -28,8 +28,8 @@ if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) diff --git a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py index 40b8d60b..08aad8b8 100644 --- a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py @@ -2208,6 +2208,7 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video annotation request. + Attributes: input_uri: Input video location. Currently, only `Google Cloud Storage @@ -2257,6 +2258,7 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. + Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -2283,6 +2285,7 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. + Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -2309,6 +2312,7 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. + Attributes: model: Model to use for shot change detection. Supported values: @@ -2326,6 +2330,7 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model: Model to use for explicit content detection. Supported values: @@ -2343,6 +2348,7 @@ "DESCRIPTOR": _TEXTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Config for TEXT_DETECTION. + Attributes: language_hints: Language hint can be specified if the language to be detected @@ -2363,6 +2369,7 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video segment. + Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -2383,6 +2390,7 @@ "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for label detection. + Attributes: segment: Video segment where a label was detected. @@ -2401,6 +2409,7 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2420,6 +2429,7 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. + Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -2442,6 +2452,7 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Label annotation. + Attributes: entity: Detected entity. @@ -2467,6 +2478,7 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -2488,6 +2500,7 @@ "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. + Attributes: frames: All video frames where explicit content was detected. @@ -2505,6 +2518,7 @@ "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1]. + Attributes: left: Left X coordinate. @@ -2527,6 +2541,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. + Attributes: input_uri: Video file location in `Google Cloud Storage @@ -2569,6 +2584,7 @@ "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_results: Annotation results for all videos specified in @@ -2586,6 +2602,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. + Attributes: input_uri: Video file location in `Google Cloud Storage @@ -2612,6 +2629,7 @@ "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_progress: Progress metadata for all videos specified in @@ -2629,6 +2647,7 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """X coordinate. + Attributes: y: Y coordinate. @@ -2652,6 +2671,7 @@ becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box. + Attributes: vertices: Normalized vertices of the bounding polygon. @@ -2668,6 +2688,7 @@ "DESCRIPTOR": _TEXTSEGMENT, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for text detection. + Attributes: segment: Video segment where a text snippet was detected. @@ -2692,6 +2713,7 @@ "__doc__": """Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets. + Attributes: rotated_bounding_box: Bounding polygon of the detected text for this frame. @@ -2712,6 +2734,7 @@ "__doc__": """Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection. + Attributes: text: The detected text. @@ -2731,6 +2754,7 @@ "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence. + Attributes: normalized_bounding_box: The normalized bounding box location of this object track for @@ -2750,6 +2774,7 @@ "DESCRIPTOR": _OBJECTTRACKINGANNOTATION, "__module__": "google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", "__doc__": """Annotations corresponding to one tracked object. + Attributes: entity: Entity to specify the object category that this track is diff --git a/google/cloud/videointelligence_v1p3beta1/__init__.py b/google/cloud/videointelligence_v1p3beta1/__init__.py index 75bb4ebe..2b0d752b 100644 --- a/google/cloud/videointelligence_v1p3beta1/__init__.py +++ b/google/cloud/videointelligence_v1p3beta1/__init__.py @@ -31,8 +31,8 @@ if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index be79a2b4..db1ad7de 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -5139,6 +5139,7 @@ "DESCRIPTOR": _ANNOTATEVIDEOREQUEST, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video annotation request. + Attributes: input_uri: Input video location. Currently, only `Cloud Storage @@ -5188,6 +5189,7 @@ "DESCRIPTOR": _VIDEOCONTEXT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video context and/or feature-specific parameters. + Attributes: segments: Video segments to annotate. The segments may overlap and are @@ -5222,6 +5224,7 @@ "DESCRIPTOR": _LABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for LABEL_DETECTION. + Attributes: label_detection_mode: What labels should be detected with LABEL_DETECTION, in @@ -5263,6 +5266,7 @@ "DESCRIPTOR": _SHOTCHANGEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for SHOT_CHANGE_DETECTION. + Attributes: model: Model to use for shot change detection. Supported values: @@ -5280,6 +5284,7 @@ "DESCRIPTOR": _OBJECTTRACKINGCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for OBJECT_TRACKING. + Attributes: model: Model to use for object tracking. Supported values: @@ -5297,6 +5302,7 @@ "DESCRIPTOR": _EXPLICITCONTENTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model: Model to use for explicit content detection. Supported values: @@ -5314,6 +5320,7 @@ "DESCRIPTOR": _FACEDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for FACE_DETECTION. + Attributes: model: Model to use for face detection. Supported values: @@ -5338,6 +5345,7 @@ "DESCRIPTOR": _PERSONDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for PERSON_DETECTION. + Attributes: include_bounding_boxes: Whether bounding boxes are included in the person detection @@ -5363,6 +5371,7 @@ "DESCRIPTOR": _TEXTDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for TEXT_DETECTION. + Attributes: language_hints: Language hint can be specified if the language to be detected @@ -5386,6 +5395,7 @@ "DESCRIPTOR": _VIDEOSEGMENT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video segment. + Attributes: start_time_offset: Time-offset, relative to the beginning of the video, @@ -5406,6 +5416,7 @@ "DESCRIPTOR": _LABELSEGMENT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for label detection. + Attributes: segment: Video segment where a label was detected. @@ -5424,6 +5435,7 @@ "DESCRIPTOR": _LABELFRAME, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for label detection. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -5443,6 +5455,7 @@ "DESCRIPTOR": _ENTITY, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Detected entity from video analysis. + Attributes: entity_id: Opaque entity ID. Some IDs may be available in `Google @@ -5465,6 +5478,7 @@ "DESCRIPTOR": _LABELANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Label annotation. + Attributes: entity: Detected entity. @@ -5490,6 +5504,7 @@ "DESCRIPTOR": _EXPLICITCONTENTFRAME, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotation results for explicit content. + Attributes: time_offset: Time-offset, relative to the beginning of the video, @@ -5511,6 +5526,7 @@ "__doc__": """Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. + Attributes: frames: All video frames where explicit content was detected. @@ -5528,6 +5544,7 @@ "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1]. + Attributes: left: Left X coordinate. @@ -5551,6 +5568,7 @@ "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """For tracking related features. An object at time_offset with attributes, and located with normalized_bounding_box. + Attributes: normalized_bounding_box: Normalized Bounding box in a frame, where the object is @@ -5575,6 +5593,7 @@ "DESCRIPTOR": _TRACK, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """A track of an object instance. + Attributes: segment: Video segment of a track. @@ -5598,6 +5617,7 @@ "DESCRIPTOR": _DETECTEDATTRIBUTE, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """A generic detected attribute represented by name in string format. + Attributes: name: The name of the attribute, for example, glasses, dark_glasses, @@ -5621,6 +5641,7 @@ "DESCRIPTOR": _CELEBRITY, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Celebrity definition. + Attributes: name: The resource name of the celebrity. Have the format ``video- @@ -5649,6 +5670,7 @@ "DESCRIPTOR": _CELEBRITYTRACK_RECOGNIZEDCELEBRITY, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """The recognized celebrity with confidence score. + Attributes: celebrity: The recognized celebrity. @@ -5663,6 +5685,7 @@ "__doc__": """The annotation result of a celebrity face track. RecognizedCelebrity field could be empty if the face track does not have any matched celebrities. + Attributes: celebrities: Top N match of the celebrities for the face in this track. @@ -5682,6 +5705,7 @@ "DESCRIPTOR": _CELEBRITYRECOGNITIONANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Celebrity recognition annotation per video. + Attributes: celebrity_tracks: The tracks detected from the input video, including recognized @@ -5700,6 +5724,7 @@ "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """A generic detected landmark represented by name in string format and a 2D location. + Attributes: name: The name of this landmark, for example, left_hand, @@ -5723,6 +5748,7 @@ "DESCRIPTOR": _FACEDETECTIONANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Face detection annotation. + Attributes: tracks: The face tracks with attributes. @@ -5741,6 +5767,7 @@ "DESCRIPTOR": _PERSONDETECTIONANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Person detection annotation per video. + Attributes: tracks: The detected tracks of a person. @@ -5757,6 +5784,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONRESULTS, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Annotation results for a single video. + Attributes: input_uri: Video file location in `Cloud Storage @@ -5830,6 +5858,7 @@ "__doc__": """Video annotation response. Included in the ``response`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_results: Annotation results for all videos specified in @@ -5847,6 +5876,7 @@ "DESCRIPTOR": _VIDEOANNOTATIONPROGRESS, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Annotation progress for a single video. + Attributes: input_uri: Video file location in `Cloud Storage @@ -5879,6 +5909,7 @@ "__doc__": """Video annotation progress. Included in the ``metadata`` field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. + Attributes: annotation_progress: Progress metadata for all videos specified in @@ -5896,6 +5927,7 @@ "DESCRIPTOR": _SPEECHTRANSCRIPTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for SPEECH_TRANSCRIPTION. + Attributes: language_code: Required. *Required* The language of the supplied audio as a @@ -5965,6 +5997,7 @@ "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Provides “hints” to the speech recognizer to favor specific words and phrases in the results. + Attributes: phrases: Optional. A list of strings containing words and phrases @@ -5988,6 +6021,7 @@ "DESCRIPTOR": _SPEECHTRANSCRIPTION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """A speech recognition result corresponding to a portion of the audio. + Attributes: alternatives: May contain one or more recognition hypotheses (up to the @@ -6013,6 +6047,7 @@ "DESCRIPTOR": _SPEECHRECOGNITIONALTERNATIVE, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Alternative hypotheses (a.k.a. n-best list). + Attributes: transcript: Transcript text representing the words that the user spoke. @@ -6044,6 +6079,7 @@ "__doc__": """Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as ``enable_word_time_offsets``. + Attributes: start_time: Time offset relative to the beginning of the audio, and @@ -6086,6 +6122,7 @@ "DESCRIPTOR": _NORMALIZEDVERTEX, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """X coordinate. + Attributes: y: Y coordinate. @@ -6109,6 +6146,7 @@ becomes: 2—-3 \| \| 1—-0 and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box. + Attributes: vertices: Normalized vertices of the bounding polygon. @@ -6125,6 +6163,7 @@ "DESCRIPTOR": _TEXTSEGMENT, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video segment level annotation results for text detection. + Attributes: segment: Video segment where a text snippet was detected. @@ -6149,6 +6188,7 @@ "__doc__": """Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets. + Attributes: rotated_bounding_box: Bounding polygon of the detected text for this frame. @@ -6169,6 +6209,7 @@ "__doc__": """Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection. + Attributes: text: The detected text. @@ -6188,6 +6229,7 @@ "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence. + Attributes: normalized_bounding_box: The normalized bounding box location of this object track for @@ -6207,6 +6249,7 @@ "DESCRIPTOR": _OBJECTTRACKINGANNOTATION, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Annotations corresponding to one tracked object. + Attributes: track_info: Different representation of tracking info in non-streaming @@ -6246,6 +6289,7 @@ "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Annotation corresponding to one detected, tracked and recognized logo class. + Attributes: entity: Entity category information to specify the logo class that all @@ -6276,6 +6320,7 @@ ``StreamingAnnotateVideoRequest`` messages are sent. The first message must only contain a ``StreamingVideoConfig`` message. All subsequent messages must only contain ``input_content`` data. + Attributes: streaming_request: \ *Required* The streaming request, which is either a @@ -6307,6 +6352,7 @@ "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Provides information to the annotator that specifies how to process the request. + Attributes: streaming_config: Config for requested annotation feature. @@ -6344,6 +6390,7 @@ the client by ``StreamingAnnotateVideo``. A series of zero or more ``StreamingAnnotateVideoResponse`` messages are streamed back to the client. + Attributes: error: If set, returns a [google.rpc.Status][google.rpc.Status] @@ -6369,6 +6416,7 @@ "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Streaming annotation results corresponding to a portion of the video that is currently being processed. + Attributes: shot_annotations: Shot annotation results. Each shot is represented as a video @@ -6404,6 +6452,7 @@ "DESCRIPTOR": _STREAMINGLABELDETECTIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for STREAMING_LABEL_DETECTION. + Attributes: stationary_camera: Whether the video has been captured from a stationary @@ -6446,6 +6495,7 @@ "DESCRIPTOR": _STREAMINGAUTOMLACTIONRECOGNITIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for STREAMING_AUTOML_ACTION_RECOGNITION. + Attributes: model_name: Resource name of AutoML model. Format: ``projects/{project_id} @@ -6463,6 +6513,7 @@ "DESCRIPTOR": _STREAMINGAUTOMLCLASSIFICATIONCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for STREAMING_AUTOML_CLASSIFICATION. + Attributes: model_name: Resource name of AutoML model. Format: ``projects/{project_num @@ -6480,6 +6531,7 @@ "DESCRIPTOR": _STREAMINGAUTOMLOBJECTTRACKINGCONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for STREAMING_AUTOML_OBJECT_TRACKING. + Attributes: model_name: Resource name of AutoML model. Format: ``projects/{project_id} @@ -6497,6 +6549,7 @@ "DESCRIPTOR": _STREAMINGSTORAGECONFIG, "__module__": "google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", "__doc__": """Config for streaming storage option. + Attributes: enable_storage_annotation_result: Enable streaming storage. Default: false. diff --git a/synth.metadata b/synth.metadata index c904376f..d169eabf 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,23 +3,23 @@ { "git": { "name": ".", - "remote": "https://github.com/googleapis/python-videointelligence.git", - "sha": "ce1e7defb7597f5afd3eb22b23259407382e5faa" + "remote": "git@github.com:googleapis/python-videointelligence.git", + "sha": "2033698ea81962ac9ff7c44c479fe3ae2d90fdd3" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "eafa840ceec23b44a5c21670288107c661252711", - "internalRef": "313488995" + "sha": "148a3caa5f6735c49d57bb4c8001a276fcfad588", + "internalRef": "315516848" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4e1d2cb79b02d7496b1452f91c518630c207145e" + "sha": "88645a8f6e1b1c90fed625158960ffec565b023a" } } ], diff --git a/synth.py b/synth.py index 7705877c..5599a61b 100644 --- a/synth.py +++ b/synth.py @@ -111,6 +111,23 @@ "response = client\.annotate_video\(features, input_uri=input_uri\)", "response = client.annotate_video(input_uri=input_uri, features=features)") +# Add missing blank line before Attributes: in generated docstrings +# Remove after +# https://github.com/googleapis/protoc-docs-plugin/pull/31 +s.replace( + "google/cloud/**/*_pb2.py", + "(\s+)Attributes:", + "\n\g<1>Attributes:" +) + +# Add noindex to types docs to silence warnings about duplicates +# TODO: Remove during microgenerator transition +s.replace( + "docs/gapic/**/types.rst", + "(\s+):members:", + "\g<1>:members:\g<1>:noindex:" +) + # ---------------------------------------------------------------------------- # Add templated files # ----------------------------------------------------------------------------