diff --git a/packages/google-cloud-vision/.cloud-repo-tools.json b/packages/google-cloud-vision/.cloud-repo-tools.json index 354f8b8dacb..0244ee6c2cd 100644 --- a/packages/google-cloud-vision/.cloud-repo-tools.json +++ b/packages/google-cloud-vision/.cloud-repo-tools.json @@ -11,6 +11,13 @@ "file": "detect.js", "docs_link": "https://cloud.google.com/vision/docs", "usage": "node detect.js --help" + }, + { + "id": "detect.v1p1beta1.js", + "name": "Detection samples for Beta API", + "file": "detect.v1p1beta1.js", + "docs_link": "https://cloud.google.com/vision/docs", + "usage": "node detect.v1p1beta1.js --help" } ] } diff --git a/packages/google-cloud-vision/CONTRIBUTORS b/packages/google-cloud-vision/CONTRIBUTORS index caefac6e083..96b20b806d0 100644 --- a/packages/google-cloud-vision/CONTRIBUTORS +++ b/packages/google-cloud-vision/CONTRIBUTORS @@ -15,4 +15,5 @@ Song Wang Stephen Sawchuk Tim Swast calibr +Rebecca Taylor rtw diff --git a/packages/google-cloud-vision/README.md b/packages/google-cloud-vision/README.md index 50f9d32e591..d04cca858d4 100644 --- a/packages/google-cloud-vision/README.md +++ b/packages/google-cloud-vision/README.md @@ -91,6 +91,7 @@ has instructions for running the samples. | Sample | Source Code | Try it | | --------------------------- | --------------------------------- | ------ | | Detection samples | [source code](https://github.com/googleapis/nodejs-vision/blob/master/samples/detect.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-vision&page=editor&open_in_editor=samples/detect.js,samples/README.md) | +| Detection samples for Beta API | [source code](https://github.com/googleapis/nodejs-vision/blob/master/samples/detect.v1p1beta1.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-vision&page=editor&open_in_editor=samples/detect.v1p1beta1.js,samples/README.md) | The [Vision API Node.js Client API Reference][client-docs] documentation also contains samples. diff --git a/packages/google-cloud-vision/package.json b/packages/google-cloud-vision/package.json index 7d9e994bbbf..e2a4d7b2d3c 100644 --- a/packages/google-cloud-vision/package.json +++ b/packages/google-cloud-vision/package.json @@ -41,6 +41,7 @@ "Stephen Sawchuk ", "Tim Swast ", "calibr ", + "Rebecca Taylor ", "rtw " ], "scripts": { diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/geometry.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/geometry.proto new file mode 100644 index 00000000000..6d46d9c342e --- /dev/null +++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/geometry.proto @@ -0,0 +1,53 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.vision.v1p1beta1; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision"; +option java_multiple_files = true; +option java_outer_classname = "GeometryProto"; +option java_package = "com.google.cloud.vision.v1p1beta1"; + +// A vertex represents a 2D point in the image. +// NOTE: the vertex coordinates are in the same scale as the original image. +message Vertex { + // X coordinate. + int32 x = 1; + + // Y coordinate. + int32 y = 2; +} + +// A bounding polygon for the detected image annotation. +message BoundingPoly { + // The bounding polygon vertices. + repeated Vertex vertices = 1; +} + +// A 3D position in the image, used primarily for Face detection landmarks. +// A valid Position must have both x and y coordinates. +// The position coordinates are in the same scale as the original image. +message Position { + // X coordinate. + float x = 1; + + // Y coordinate. + float y = 2; + + // Z coordinate (or depth). + float z = 3; +} diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/image_annotator.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/image_annotator.proto new file mode 100644 index 00000000000..73b8db853aa --- /dev/null +++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/image_annotator.proto @@ -0,0 +1,591 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.vision.v1p1beta1; + +import "google/api/annotations.proto"; +import "google/cloud/vision/v1p1beta1/geometry.proto"; +import "google/cloud/vision/v1p1beta1/text_annotation.proto"; +import "google/cloud/vision/v1p1beta1/web_detection.proto"; +import "google/rpc/status.proto"; +import "google/type/color.proto"; +import "google/type/latlng.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision"; +option java_multiple_files = true; +option java_outer_classname = "ImageAnnotatorProto"; +option java_package = "com.google.cloud.vision.v1p1beta1"; + +// Service that performs Google Cloud Vision API detection tasks over client +// images, such as face, landmark, logo, label, and text detection. The +// ImageAnnotator service returns detected entities from the images. +service ImageAnnotator { + // Run image detection and annotation for a batch of images. + rpc BatchAnnotateImages(BatchAnnotateImagesRequest) + returns (BatchAnnotateImagesResponse) { + option (google.api.http) = { + post: "/v1p1beta1/images:annotate" + body: "*" + }; + } +} + +// Users describe the type of Google Cloud Vision API tasks to perform over +// images by using *Feature*s. Each Feature indicates a type of image +// detection task to perform. Features encode the Cloud Vision API +// vertical to operate on and the number of top-scoring results to return. +message Feature { + // Type of image feature. + enum Type { + // Unspecified feature type. + TYPE_UNSPECIFIED = 0; + + // Run face detection. + FACE_DETECTION = 1; + + // Run landmark detection. + LANDMARK_DETECTION = 2; + + // Run logo detection. + LOGO_DETECTION = 3; + + // Run label detection. + LABEL_DETECTION = 4; + + // Run OCR. + TEXT_DETECTION = 5; + + // Run dense text document OCR. Takes precedence when both + // DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. + DOCUMENT_TEXT_DETECTION = 11; + + // Run computer vision models to compute image safe-search properties. + SAFE_SEARCH_DETECTION = 6; + + // Compute a set of image properties, such as the image's dominant colors. + IMAGE_PROPERTIES = 7; + + // Run crop hints. + CROP_HINTS = 9; + + // Run web detection. + WEB_DETECTION = 10; + } + + // The feature type. + Type type = 1; + + // Maximum number of results of this type. + int32 max_results = 2; + + // Model to use for the feature. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; +} + +// External image source (Google Cloud Storage image location). +message ImageSource { + // NOTE: For new code `image_uri` below is preferred. + // Google Cloud Storage image URI, which must be in the following form: + // `gs://bucket_name/object_name` (for details, see + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris)). + // NOTE: Cloud Storage object versioning is not supported. + string gcs_image_uri = 1; + + // Image URI which supports: + // 1) Google Cloud Storage image URI, which must be in the following form: + // `gs://bucket_name/object_name` (for details, see + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris)). + // NOTE: Cloud Storage object versioning is not supported. + // 2) Publicly accessible image HTTP/HTTPS URL. + // This is preferred over the legacy `gcs_image_uri` above. When both + // `gcs_image_uri` and `image_uri` are specified, `image_uri` takes + // precedence. + string image_uri = 2; +} + +// Client image to perform Google Cloud Vision API tasks over. +message Image { + // Image content, represented as a stream of bytes. + // Note: as with all `bytes` fields, protobuffers use a pure binary + // representation, whereas JSON representations use base64. + bytes content = 1; + + // Google Cloud Storage image location. If both `content` and `source` + // are provided for an image, `content` takes precedence and is + // used to perform the image annotation request. + ImageSource source = 2; +} + +// A face annotation object contains the results of face detection. +message FaceAnnotation { + // A face-specific landmark (for example, a face feature). + message Landmark { + // Face landmark (feature) type. + // Left and right are defined from the vantage of the viewer of the image + // without considering mirror projections typical of photos. So, `LEFT_EYE`, + // typically, is the person's right eye. + enum Type { + // Unknown face landmark detected. Should not be filled. + UNKNOWN_LANDMARK = 0; + + // Left eye. + LEFT_EYE = 1; + + // Right eye. + RIGHT_EYE = 2; + + // Left of left eyebrow. + LEFT_OF_LEFT_EYEBROW = 3; + + // Right of left eyebrow. + RIGHT_OF_LEFT_EYEBROW = 4; + + // Left of right eyebrow. + LEFT_OF_RIGHT_EYEBROW = 5; + + // Right of right eyebrow. + RIGHT_OF_RIGHT_EYEBROW = 6; + + // Midpoint between eyes. + MIDPOINT_BETWEEN_EYES = 7; + + // Nose tip. + NOSE_TIP = 8; + + // Upper lip. + UPPER_LIP = 9; + + // Lower lip. + LOWER_LIP = 10; + + // Mouth left. + MOUTH_LEFT = 11; + + // Mouth right. + MOUTH_RIGHT = 12; + + // Mouth center. + MOUTH_CENTER = 13; + + // Nose, bottom right. + NOSE_BOTTOM_RIGHT = 14; + + // Nose, bottom left. + NOSE_BOTTOM_LEFT = 15; + + // Nose, bottom center. + NOSE_BOTTOM_CENTER = 16; + + // Left eye, top boundary. + LEFT_EYE_TOP_BOUNDARY = 17; + + // Left eye, right corner. + LEFT_EYE_RIGHT_CORNER = 18; + + // Left eye, bottom boundary. + LEFT_EYE_BOTTOM_BOUNDARY = 19; + + // Left eye, left corner. + LEFT_EYE_LEFT_CORNER = 20; + + // Right eye, top boundary. + RIGHT_EYE_TOP_BOUNDARY = 21; + + // Right eye, right corner. + RIGHT_EYE_RIGHT_CORNER = 22; + + // Right eye, bottom boundary. + RIGHT_EYE_BOTTOM_BOUNDARY = 23; + + // Right eye, left corner. + RIGHT_EYE_LEFT_CORNER = 24; + + // Left eyebrow, upper midpoint. + LEFT_EYEBROW_UPPER_MIDPOINT = 25; + + // Right eyebrow, upper midpoint. + RIGHT_EYEBROW_UPPER_MIDPOINT = 26; + + // Left ear tragion. + LEFT_EAR_TRAGION = 27; + + // Right ear tragion. + RIGHT_EAR_TRAGION = 28; + + // Left eye pupil. + LEFT_EYE_PUPIL = 29; + + // Right eye pupil. + RIGHT_EYE_PUPIL = 30; + + // Forehead glabella. + FOREHEAD_GLABELLA = 31; + + // Chin gnathion. + CHIN_GNATHION = 32; + + // Chin left gonion. + CHIN_LEFT_GONION = 33; + + // Chin right gonion. + CHIN_RIGHT_GONION = 34; + } + + // Face landmark type. + Type type = 3; + + // Face landmark position. + Position position = 4; + } + + // The bounding polygon around the face. The coordinates of the bounding box + // are in the original image's scale, as returned in `ImageParams`. + // The bounding box is computed to "frame" the face in accordance with human + // expectations. It is based on the landmarker results. + // Note that one or more x and/or y coordinates may not be generated in the + // `BoundingPoly` (the polygon will be unbounded) if only a partial face + // appears in the image to be annotated. + BoundingPoly bounding_poly = 1; + + // The `fd_bounding_poly` bounding polygon is tighter than the + // `boundingPoly`, and encloses only the skin part of the face. Typically, it + // is used to eliminate the face from any image analysis that detects the + // "amount of skin" visible in an image. It is not based on the + // landmarker results, only on the initial face detection, hence + // the fd (face detection) prefix. + BoundingPoly fd_bounding_poly = 2; + + // Detected face landmarks. + repeated Landmark landmarks = 3; + + // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation + // of the face relative to the image vertical about the axis perpendicular to + // the face. Range [-180,180]. + float roll_angle = 4; + + // Yaw angle, which indicates the leftward/rightward angle that the face is + // pointing relative to the vertical plane perpendicular to the image. Range + // [-180,180]. + float pan_angle = 5; + + // Pitch angle, which indicates the upwards/downwards angle that the face is + // pointing relative to the image's horizontal plane. Range [-180,180]. + float tilt_angle = 6; + + // Detection confidence. Range [0, 1]. + float detection_confidence = 7; + + // Face landmarking confidence. Range [0, 1]. + float landmarking_confidence = 8; + + // Joy likelihood. + Likelihood joy_likelihood = 9; + + // Sorrow likelihood. + Likelihood sorrow_likelihood = 10; + + // Anger likelihood. + Likelihood anger_likelihood = 11; + + // Surprise likelihood. + Likelihood surprise_likelihood = 12; + + // Under-exposed likelihood. + Likelihood under_exposed_likelihood = 13; + + // Blurred likelihood. + Likelihood blurred_likelihood = 14; + + // Headwear likelihood. + Likelihood headwear_likelihood = 15; +} + +// Detected entity location information. +message LocationInfo { + // lat/long location coordinates. + google.type.LatLng lat_lng = 1; +} + +// A `Property` consists of a user-supplied name/value pair. +message Property { + // Name of the property. + string name = 1; + + // Value of the property. + string value = 2; + + // Value of numeric properties. + uint64 uint64_value = 3; +} + +// Set of detected entity features. +message EntityAnnotation { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/). + string mid = 1; + + // The language code for the locale in which the entity textual + // `description` is expressed. + string locale = 2; + + // Entity textual description, expressed in its `locale` language. + string description = 3; + + // Overall score of the result. Range [0, 1]. + float score = 4; + + // The accuracy of the entity detection in an image. + // For example, for an image in which the "Eiffel Tower" entity is detected, + // this field represents the confidence that there is a tower in the query + // image. Range [0, 1]. + float confidence = 5; + + // The relevancy of the ICA (Image Content Annotation) label to the + // image. For example, the relevancy of "tower" is likely higher to an image + // containing the detected "Eiffel Tower" than to an image containing a + // detected distant towering building, even though the confidence that + // there is a tower in each image may be the same. Range [0, 1]. + float topicality = 6; + + // Image region to which this entity belongs. Not produced + // for `LABEL_DETECTION` features. + BoundingPoly bounding_poly = 7; + + // The location information for the detected entity. Multiple + // `LocationInfo` elements can be present because one location may + // indicate the location of the scene in the image, and another location + // may indicate the location of the place where the image was taken. + // Location information is usually present for landmarks. + repeated LocationInfo locations = 8; + + // Some entities may have optional user-supplied `Property` (name/value) + // fields, such a score or string that qualifies the entity. + repeated Property properties = 9; +} + +// Set of features pertaining to the image, computed by computer vision +// methods over safe-search verticals (for example, adult, spoof, medical, +// violence). +message SafeSearchAnnotation { + // Represents the adult content likelihood for the image. Adult content may + // contain elements such as nudity, pornographic images or cartoons, or + // sexual activities. + Likelihood adult = 1; + + // Spoof likelihood. The likelihood that an modification + // was made to the image's canonical version to make it appear + // funny or offensive. + Likelihood spoof = 2; + + // Likelihood that this is a medical image. + Likelihood medical = 3; + + // Likelihood that this image contains violent content. + Likelihood violence = 4; + + // Likelihood that the request image contains racy content. Racy content may + // include (but is not limited to) skimpy or sheer clothing, strategically + // covered nudity, lewd or provocative poses, or close-ups of sensitive + // body areas. + Likelihood racy = 9; +} + +// Rectangle determined by min and max `LatLng` pairs. +message LatLongRect { + // Min lat/long pair. + google.type.LatLng min_lat_lng = 1; + + // Max lat/long pair. + google.type.LatLng max_lat_lng = 2; +} + +// Color information consists of RGB channels, score, and the fraction of +// the image that the color occupies in the image. +message ColorInfo { + // RGB components of the color. + google.type.Color color = 1; + + // Image-specific score for this color. Value in range [0, 1]. + float score = 2; + + // The fraction of pixels the color occupies in the image. + // Value in range [0, 1]. + float pixel_fraction = 3; +} + +// Set of dominant colors and their corresponding scores. +message DominantColorsAnnotation { + // RGB color values with their score and pixel fraction. + repeated ColorInfo colors = 1; +} + +// Stores image properties, such as dominant colors. +message ImageProperties { + // If present, dominant colors completed successfully. + DominantColorsAnnotation dominant_colors = 1; +} + +// Single crop hint that is used to generate a new crop when serving an image. +message CropHint { + // The bounding polygon for the crop region. The coordinates of the bounding + // box are in the original image's scale, as returned in `ImageParams`. + BoundingPoly bounding_poly = 1; + + // Confidence of this being a salient region. Range [0, 1]. + float confidence = 2; + + // Fraction of importance of this salient region with respect to the original + // image. + float importance_fraction = 3; +} + +// Set of crop hints that are used to generate new crops when serving images. +message CropHintsAnnotation { + // Crop hint results. + repeated CropHint crop_hints = 1; +} + +// Parameters for crop hints annotation request. +message CropHintsParams { + // Aspect ratios in floats, representing the ratio of the width to the height + // of the image. For example, if the desired aspect ratio is 4/3, the + // corresponding float value should be 1.33333. If not specified, the + // best possible crop is returned. The number of provided aspect ratios is + // limited to a maximum of 16; any aspect ratios provided after the 16th are + // ignored. + repeated float aspect_ratios = 1; +} + +// Parameters for web detection request. +message WebDetectionParams { + // Whether to include results derived from the geo information in the image. + bool include_geo_results = 2; +} + +// Image context and/or feature-specific parameters. +message ImageContext { + // lat/long rectangle that specifies the location of the image. + LatLongRect lat_long_rect = 1; + + // List of languages to use for TEXT_DETECTION. In most cases, an empty value + // yields the best results since it enables automatic language detection. For + // languages based on the Latin alphabet, setting `language_hints` is not + // needed. In rare cases, when the language of the text in the image is known, + // setting a hint will help get better results (although it will be a + // significant hindrance if the hint is wrong). Text detection returns an + // error if one or more of the specified languages is not one of the + // [supported languages](/vision/docs/languages). + repeated string language_hints = 2; + + // Parameters for crop hints annotation request. + CropHintsParams crop_hints_params = 4; + + // Parameters for web detection. + WebDetectionParams web_detection_params = 6; +} + +// Request for performing Google Cloud Vision API tasks over a user-provided +// image, with user-requested features. +message AnnotateImageRequest { + // The image to be processed. + Image image = 1; + + // Requested features. + repeated Feature features = 2; + + // Additional context that may accompany the image. + ImageContext image_context = 3; +} + +// Response to an image annotation request. +message AnnotateImageResponse { + // If present, face detection has completed successfully. + repeated FaceAnnotation face_annotations = 1; + + // If present, landmark detection has completed successfully. + repeated EntityAnnotation landmark_annotations = 2; + + // If present, logo detection has completed successfully. + repeated EntityAnnotation logo_annotations = 3; + + // If present, label detection has completed successfully. + repeated EntityAnnotation label_annotations = 4; + + // If present, text (OCR) detection has completed successfully. + repeated EntityAnnotation text_annotations = 5; + + // If present, text (OCR) detection or document (OCR) text detection has + // completed successfully. + // This annotation provides the structural hierarchy for the OCR detected + // text. + TextAnnotation full_text_annotation = 12; + + // If present, safe-search annotation has completed successfully. + SafeSearchAnnotation safe_search_annotation = 6; + + // If present, image properties were extracted successfully. + ImageProperties image_properties_annotation = 8; + + // If present, crop hints have completed successfully. + CropHintsAnnotation crop_hints_annotation = 11; + + // If present, web detection has completed successfully. + WebDetection web_detection = 13; + + // If set, represents the error message for the operation. + // Note that filled-in image annotations are guaranteed to be + // correct, even when `error` is set. + google.rpc.Status error = 9; +} + +// Multiple image annotation requests are batched into a single service call. +message BatchAnnotateImagesRequest { + // Individual image annotation requests for this batch. + repeated AnnotateImageRequest requests = 1; +} + +// Response to a batch image annotation request. +message BatchAnnotateImagesResponse { + // Individual responses to image annotation requests within the batch. + repeated AnnotateImageResponse responses = 1; +} + +// A bucketized representation of likelihood, which is intended to give clients +// highly stable results across model upgrades. +enum Likelihood { + // Unknown likelihood. + UNKNOWN = 0; + + // It is very unlikely that the image belongs to the specified vertical. + VERY_UNLIKELY = 1; + + // It is unlikely that the image belongs to the specified vertical. + UNLIKELY = 2; + + // It is possible that the image belongs to the specified vertical. + POSSIBLE = 3; + + // It is likely that the image belongs to the specified vertical. + LIKELY = 4; + + // It is very likely that the image belongs to the specified vertical. + VERY_LIKELY = 5; +} diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/text_annotation.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/text_annotation.proto new file mode 100644 index 00000000000..928e6e88bb0 --- /dev/null +++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/text_annotation.proto @@ -0,0 +1,252 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.vision.v1p1beta1; + +import "google/api/annotations.proto"; +import "google/cloud/vision/v1p1beta1/geometry.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision"; +option java_multiple_files = true; +option java_outer_classname = "TextAnnotationProto"; +option java_package = "com.google.cloud.vision.v1p1beta1"; + +// TextAnnotation contains a structured representation of OCR extracted text. +// The hierarchy of an OCR extracted text structure is like this: +// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol +// Each structural component, starting from Page, may further have their own +// properties. Properties describe detected languages, breaks etc.. Please refer +// to the +// [TextAnnotation.TextProperty][google.cloud.vision.v1p1beta1.TextAnnotation.TextProperty] +// message definition below for more detail. +message TextAnnotation { + // Detected language for a structural component. + message DetectedLanguage { + // The BCP-47 language code, such as "en-US" or "sr-Latn". For more + // information, see + // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + string language_code = 1; + + // Confidence of detected language. Range [0, 1]. + float confidence = 2; + } + + // Detected start or end of a structural component. + message DetectedBreak { + // Enum to denote the type of break found. New line, space etc. + enum BreakType { + // Unknown break label type. + UNKNOWN = 0; + + // Regular space. + SPACE = 1; + + // Sure space (very wide). + SURE_SPACE = 2; + + // Line-wrapping break. + EOL_SURE_SPACE = 3; + + // End-line hyphen that is not present in text; does not co-occur with + // `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. + HYPHEN = 4; + + // Line break that ends a paragraph. + LINE_BREAK = 5; + } + + // Detected break type. + BreakType type = 1; + + // True if break prepends the element. + bool is_prefix = 2; + } + + // Additional information detected on the structural component. + message TextProperty { + // A list of detected languages together with confidence. + repeated DetectedLanguage detected_languages = 1; + + // Detected start or end of a text segment. + DetectedBreak detected_break = 2; + } + + // List of pages detected by OCR. + repeated Page pages = 1; + + // UTF-8 text detected on the pages. + string text = 2; +} + +// Detected page from OCR. +message Page { + // Additional information detected on the page. + TextAnnotation.TextProperty property = 1; + + // Page width in pixels. + int32 width = 2; + + // Page height in pixels. + int32 height = 3; + + // List of blocks of text, images etc on this page. + repeated Block blocks = 4; + + // Confidence of the OCR results on the page. Range [0, 1]. + float confidence = 5; +} + +// Logical element on the page. +message Block { + // Type of a block (text, image etc) as identified by OCR. + enum BlockType { + // Unknown block type. + UNKNOWN = 0; + + // Regular text block. + TEXT = 1; + + // Table block. + TABLE = 2; + + // Image block. + PICTURE = 3; + + // Horizontal/vertical line box. + RULER = 4; + + // Barcode block. + BARCODE = 5; + } + + // Additional information detected for the block. + TextAnnotation.TextProperty property = 1; + + // The bounding box for the block. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingPoly bounding_box = 2; + + // List of paragraphs in this block (if this blocks is of type text). + repeated Paragraph paragraphs = 3; + + // Detected block type (text, image etc) for this block. + BlockType block_type = 4; + + // Confidence of the OCR results on the block. Range [0, 1]. + float confidence = 5; +} + +// Structural unit of text representing a number of words in certain order. +message Paragraph { + // Additional information detected for the paragraph. + TextAnnotation.TextProperty property = 1; + + // The bounding box for the paragraph. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingPoly bounding_box = 2; + + // List of words in this paragraph. + repeated Word words = 3; + + // Confidence of the OCR results for the paragraph. Range [0, 1]. + float confidence = 4; +} + +// A word representation. +message Word { + // Additional information detected for the word. + TextAnnotation.TextProperty property = 1; + + // The bounding box for the word. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingPoly bounding_box = 2; + + // List of symbols in the word. + // The order of the symbols follows the natural reading order. + repeated Symbol symbols = 3; + + // Confidence of the OCR results for the word. Range [0, 1]. + float confidence = 4; +} + +// A single symbol representation. +message Symbol { + // Additional information detected for the symbol. + TextAnnotation.TextProperty property = 1; + + // The bounding box for the symbol. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingPoly bounding_box = 2; + + // The actual UTF-8 representation of the symbol. + string text = 3; + + // Confidence of the OCR results for the symbol. Range [0, 1]. + float confidence = 4; +} diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/web_detection.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/web_detection.proto new file mode 100644 index 00000000000..28249cbdec2 --- /dev/null +++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1p1beta1/web_detection.proto @@ -0,0 +1,104 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.vision.v1p1beta1; + +import "google/api/annotations.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision"; +option java_multiple_files = true; +option java_outer_classname = "WebDetectionProto"; +option java_package = "com.google.cloud.vision.v1p1beta1"; + +// Relevant information for the image from the Internet. +message WebDetection { + // Entity deduced from similar images on the Internet. + message WebEntity { + // Opaque entity ID. + string entity_id = 1; + + // Overall relevancy score for the entity. + // Not normalized and not comparable across different image queries. + float score = 2; + + // Canonical description of the entity, in English. + string description = 3; + } + + // Metadata for online images. + message WebImage { + // The result image URL. + string url = 1; + + // (Deprecated) Overall relevancy score for the image. + float score = 2; + } + + // Metadata for web pages. + message WebPage { + // The result web page URL. + string url = 1; + + // (Deprecated) Overall relevancy score for the web page. + float score = 2; + + // Title for the web page, may contain HTML markups. + string page_title = 3; + + // Fully matching images on the page. + // Can include resized copies of the query image. + repeated WebImage full_matching_images = 4; + + // Partial matching images on the page. + // Those images are similar enough to share some key-point features. For + // example an original image will likely have partial matching for its + // crops. + repeated WebImage partial_matching_images = 5; + } + + // Label to provide extra metadata for the web detection. + message WebLabel { + // Label for extra metadata. + string label = 1; + + // The BCP-47 language code for `label`, such as "en-US" or "sr-Latn". + // For more information, see + // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + string language_code = 2; + } + + // Deduced entities from similar images on the Internet. + repeated WebEntity web_entities = 1; + + // Fully matching images from the Internet. + // Can include resized copies of the query image. + repeated WebImage full_matching_images = 2; + + // Partial matching images from the Internet. + // Those images are similar enough to share some key-point features. For + // example an original image will likely have partial matching for its crops. + repeated WebImage partial_matching_images = 3; + + // Web pages containing the matching images from the Internet. + repeated WebPage pages_with_matching_images = 4; + + // The visually similar image results. + repeated WebImage visually_similar_images = 6; + + // Best guess text labels for the request image. + repeated WebLabel best_guess_labels = 8; +} diff --git a/packages/google-cloud-vision/samples/README.md b/packages/google-cloud-vision/samples/README.md index 8d78307fed0..9ab0b895215 100644 --- a/packages/google-cloud-vision/samples/README.md +++ b/packages/google-cloud-vision/samples/README.md @@ -11,6 +11,7 @@ The [Cloud Vision API](https://cloud.google.com/vision/docs) allows developers t * [Before you begin](#before-you-begin) * [Samples](#samples) * [Detection samples](#detection-samples) + * [Detection samples for Beta API](#detection-samples-for-beta-api) ## Before you begin @@ -86,5 +87,39 @@ For more information, see https://cloud.google.com/vision/docs [detect_0_docs]: https://cloud.google.com/vision/docs [detect_0_code]: detect.js +### Detection samples for Beta API + +View the [source code][detect.v1p1beta1.js_1_code]. + +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-vision&page=editor&open_in_editor=samples/detect.v1p1beta1.js,samples/README.md) + +__Usage:__ `node detect.v1p1beta1.js --help` + +``` +detect.v1p1beta1.js + +Commands: + detect.v1p1beta1.js web-entities-geo Detects web entities with improved results using geographic metadata + detect.v1p1beta1.js safe-search Detects safe search properties including additional racy category + detect.v1p1beta1.js web Detects web entities including new best guess labels describing + content + detect.v1p1beta1.js fulltext Extracts full text from an image file including new confidence scores + +Options: + --version Show version number [boolean] + --help Show help [boolean] + +Examples: + node detect.v1p1beta1.js safe-search ./resources/wakeupcat.jpg + node detect.v1p1beta1.js web-entities-geo ./resources/city.jpg + node detect.v1p1beta1.js web ./resources/wakeupcat.jpg + node detect.v1p1beta1.js fulltext ./resources/wakeupcat.jpg + +For more information, see https://cloud.google.com/vision/docs +``` + +[detect.v1p1beta1.js_1_docs]: https://cloud.google.com/vision/docs +[detect.v1p1beta1.js_1_code]: detect.v1p1beta1.js + [shell_img]: http://gstatic.com/cloudssh/images/open-btn.png [shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-vision&page=editor&open_in_editor=samples/README.md diff --git a/packages/google-cloud-vision/src/index.js b/packages/google-cloud-vision/src/index.js index c0f800792f7..ab541eeb92b 100644 --- a/packages/google-cloud-vision/src/index.js +++ b/packages/google-cloud-vision/src/index.js @@ -24,6 +24,9 @@ /** * @namespace google.cloud.vision.v1 */ +/** + * @namespace google.cloud.vision.v1p1beta1 + */ /** * @namespace google.protobuf */ @@ -41,6 +44,7 @@ const helpers = require('./helpers'); // Import the clients for each version supported by this package. const gapic = Object.freeze({ v1: require('./v1'), + v1p1beta1: require('./v1p1beta1'), }); // Augment the SpeechClient objects with the helpers. @@ -84,5 +88,12 @@ module.exports = gapic.v1; */ module.exports.v1 = gapic.v1; +/** + * @type {object} + * @property {constructor} ImageAnnotatorClient + * Reference to {@link v1p1beta1.ImageAnnotatorClient} + */ +module.exports.v1p1beta1 = gapic.v1p1beta1; + // Alias `module.exports` as `module.exports.default`, for future-proofing. module.exports.default = Object.assign({}, module.exports); diff --git a/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_geometry.js b/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_geometry.js new file mode 100644 index 00000000000..fee7703c773 --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_geometry.js @@ -0,0 +1,72 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * A vertex represents a 2D point in the image. + * NOTE: the vertex coordinates are in the same scale as the original image. + * + * @property {number} x + * X coordinate. + * + * @property {number} y + * Y coordinate. + * + * @typedef Vertex + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Vertex definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/geometry.proto} + */ +var Vertex = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A bounding polygon for the detected image annotation. + * + * @property {Object[]} vertices + * The bounding polygon vertices. + * + * This object should have the same structure as [Vertex]{@link google.cloud.vision.v1p1beta1.Vertex} + * + * @typedef BoundingPoly + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.BoundingPoly definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/geometry.proto} + */ +var BoundingPoly = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A 3D position in the image, used primarily for Face detection landmarks. + * A valid Position must have both x and y coordinates. + * The position coordinates are in the same scale as the original image. + * + * @property {number} x + * X coordinate. + * + * @property {number} y + * Y coordinate. + * + * @property {number} z + * Z coordinate (or depth). + * + * @typedef Position + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Position definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/geometry.proto} + */ +var Position = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_image_annotator.js b/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_image_annotator.js new file mode 100644 index 00000000000..1d631c71152 --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_image_annotator.js @@ -0,0 +1,958 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * Users describe the type of Google Cloud Vision API tasks to perform over + * images by using *Feature*s. Each Feature indicates a type of image + * detection task to perform. Features encode the Cloud Vision API + * vertical to operate on and the number of top-scoring results to return. + * + * @property {number} type + * The feature type. + * + * The number should be among the values of [Type]{@link google.cloud.vision.v1p1beta1.Type} + * + * @property {number} maxResults + * Maximum number of results of this type. + * + * @property {string} model + * Model to use for the feature. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @typedef Feature + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Feature definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var Feature = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Type of image feature. + * + * @enum {number} + * @memberof google.cloud.vision.v1p1beta1 + */ + Type: { + + /** + * Unspecified feature type. + */ + TYPE_UNSPECIFIED: 0, + + /** + * Run face detection. + */ + FACE_DETECTION: 1, + + /** + * Run landmark detection. + */ + LANDMARK_DETECTION: 2, + + /** + * Run logo detection. + */ + LOGO_DETECTION: 3, + + /** + * Run label detection. + */ + LABEL_DETECTION: 4, + + /** + * Run OCR. + */ + TEXT_DETECTION: 5, + + /** + * Run dense text document OCR. Takes precedence when both + * DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. + */ + DOCUMENT_TEXT_DETECTION: 11, + + /** + * Run computer vision models to compute image safe-search properties. + */ + SAFE_SEARCH_DETECTION: 6, + + /** + * Compute a set of image properties, such as the image's dominant colors. + */ + IMAGE_PROPERTIES: 7, + + /** + * Run crop hints. + */ + CROP_HINTS: 9, + + /** + * Run web detection. + */ + WEB_DETECTION: 10 + } +}; + +/** + * External image source (Google Cloud Storage image location). + * + * @property {string} gcsImageUri + * NOTE: For new code `image_uri` below is preferred. + * Google Cloud Storage image URI, which must be in the following form: + * `gs://bucket_name/object_name` (for details, see + * [Google Cloud Storage Request + * URIs](https://cloud.google.com/storage/docs/reference-uris)). + * NOTE: Cloud Storage object versioning is not supported. + * + * @property {string} imageUri + * Image URI which supports: + * 1) Google Cloud Storage image URI, which must be in the following form: + * `gs://bucket_name/object_name` (for details, see + * [Google Cloud Storage Request + * URIs](https://cloud.google.com/storage/docs/reference-uris)). + * NOTE: Cloud Storage object versioning is not supported. + * 2) Publicly accessible image HTTP/HTTPS URL. + * This is preferred over the legacy `gcs_image_uri` above. When both + * `gcs_image_uri` and `image_uri` are specified, `image_uri` takes + * precedence. + * + * @typedef ImageSource + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.ImageSource definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var ImageSource = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Client image to perform Google Cloud Vision API tasks over. + * + * @property {string} content + * Image content, represented as a stream of bytes. + * Note: as with all `bytes` fields, protobuffers use a pure binary + * representation, whereas JSON representations use base64. + * + * @property {Object} source + * Google Cloud Storage image location. If both `content` and `source` + * are provided for an image, `content` takes precedence and is + * used to perform the image annotation request. + * + * This object should have the same structure as [ImageSource]{@link google.cloud.vision.v1p1beta1.ImageSource} + * + * @typedef Image + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Image definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var Image = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A face annotation object contains the results of face detection. + * + * @property {Object} boundingPoly + * The bounding polygon around the face. The coordinates of the bounding box + * are in the original image's scale, as returned in `ImageParams`. + * The bounding box is computed to "frame" the face in accordance with human + * expectations. It is based on the landmarker results. + * Note that one or more x and/or y coordinates may not be generated in the + * `BoundingPoly` (the polygon will be unbounded) if only a partial face + * appears in the image to be annotated. + * + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p1beta1.BoundingPoly} + * + * @property {Object} fdBoundingPoly + * The `fd_bounding_poly` bounding polygon is tighter than the + * `boundingPoly`, and encloses only the skin part of the face. Typically, it + * is used to eliminate the face from any image analysis that detects the + * "amount of skin" visible in an image. It is not based on the + * landmarker results, only on the initial face detection, hence + * the fd (face detection) prefix. + * + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p1beta1.BoundingPoly} + * + * @property {Object[]} landmarks + * Detected face landmarks. + * + * This object should have the same structure as [Landmark]{@link google.cloud.vision.v1p1beta1.Landmark} + * + * @property {number} rollAngle + * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation + * of the face relative to the image vertical about the axis perpendicular to + * the face. Range [-180,180]. + * + * @property {number} panAngle + * Yaw angle, which indicates the leftward/rightward angle that the face is + * pointing relative to the vertical plane perpendicular to the image. Range + * [-180,180]. + * + * @property {number} tiltAngle + * Pitch angle, which indicates the upwards/downwards angle that the face is + * pointing relative to the image's horizontal plane. Range [-180,180]. + * + * @property {number} detectionConfidence + * Detection confidence. Range [0, 1]. + * + * @property {number} landmarkingConfidence + * Face landmarking confidence. Range [0, 1]. + * + * @property {number} joyLikelihood + * Joy likelihood. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} sorrowLikelihood + * Sorrow likelihood. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} angerLikelihood + * Anger likelihood. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} surpriseLikelihood + * Surprise likelihood. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} underExposedLikelihood + * Under-exposed likelihood. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} blurredLikelihood + * Blurred likelihood. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} headwearLikelihood + * Headwear likelihood. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @typedef FaceAnnotation + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.FaceAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var FaceAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * A face-specific landmark (for example, a face feature). + * + * @property {number} type + * Face landmark type. + * + * The number should be among the values of [Type]{@link google.cloud.vision.v1p1beta1.Type} + * + * @property {Object} position + * Face landmark position. + * + * This object should have the same structure as [Position]{@link google.cloud.vision.v1p1beta1.Position} + * + * @typedef Landmark + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ + Landmark: { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Face landmark (feature) type. + * Left and right are defined from the vantage of the viewer of the image + * without considering mirror projections typical of photos. So, `LEFT_EYE`, + * typically, is the person's right eye. + * + * @enum {number} + * @memberof google.cloud.vision.v1p1beta1 + */ + Type: { + + /** + * Unknown face landmark detected. Should not be filled. + */ + UNKNOWN_LANDMARK: 0, + + /** + * Left eye. + */ + LEFT_EYE: 1, + + /** + * Right eye. + */ + RIGHT_EYE: 2, + + /** + * Left of left eyebrow. + */ + LEFT_OF_LEFT_EYEBROW: 3, + + /** + * Right of left eyebrow. + */ + RIGHT_OF_LEFT_EYEBROW: 4, + + /** + * Left of right eyebrow. + */ + LEFT_OF_RIGHT_EYEBROW: 5, + + /** + * Right of right eyebrow. + */ + RIGHT_OF_RIGHT_EYEBROW: 6, + + /** + * Midpoint between eyes. + */ + MIDPOINT_BETWEEN_EYES: 7, + + /** + * Nose tip. + */ + NOSE_TIP: 8, + + /** + * Upper lip. + */ + UPPER_LIP: 9, + + /** + * Lower lip. + */ + LOWER_LIP: 10, + + /** + * Mouth left. + */ + MOUTH_LEFT: 11, + + /** + * Mouth right. + */ + MOUTH_RIGHT: 12, + + /** + * Mouth center. + */ + MOUTH_CENTER: 13, + + /** + * Nose, bottom right. + */ + NOSE_BOTTOM_RIGHT: 14, + + /** + * Nose, bottom left. + */ + NOSE_BOTTOM_LEFT: 15, + + /** + * Nose, bottom center. + */ + NOSE_BOTTOM_CENTER: 16, + + /** + * Left eye, top boundary. + */ + LEFT_EYE_TOP_BOUNDARY: 17, + + /** + * Left eye, right corner. + */ + LEFT_EYE_RIGHT_CORNER: 18, + + /** + * Left eye, bottom boundary. + */ + LEFT_EYE_BOTTOM_BOUNDARY: 19, + + /** + * Left eye, left corner. + */ + LEFT_EYE_LEFT_CORNER: 20, + + /** + * Right eye, top boundary. + */ + RIGHT_EYE_TOP_BOUNDARY: 21, + + /** + * Right eye, right corner. + */ + RIGHT_EYE_RIGHT_CORNER: 22, + + /** + * Right eye, bottom boundary. + */ + RIGHT_EYE_BOTTOM_BOUNDARY: 23, + + /** + * Right eye, left corner. + */ + RIGHT_EYE_LEFT_CORNER: 24, + + /** + * Left eyebrow, upper midpoint. + */ + LEFT_EYEBROW_UPPER_MIDPOINT: 25, + + /** + * Right eyebrow, upper midpoint. + */ + RIGHT_EYEBROW_UPPER_MIDPOINT: 26, + + /** + * Left ear tragion. + */ + LEFT_EAR_TRAGION: 27, + + /** + * Right ear tragion. + */ + RIGHT_EAR_TRAGION: 28, + + /** + * Left eye pupil. + */ + LEFT_EYE_PUPIL: 29, + + /** + * Right eye pupil. + */ + RIGHT_EYE_PUPIL: 30, + + /** + * Forehead glabella. + */ + FOREHEAD_GLABELLA: 31, + + /** + * Chin gnathion. + */ + CHIN_GNATHION: 32, + + /** + * Chin left gonion. + */ + CHIN_LEFT_GONION: 33, + + /** + * Chin right gonion. + */ + CHIN_RIGHT_GONION: 34 + } + } +}; + +/** + * Detected entity location information. + * + * @property {Object} latLng + * lat/long location coordinates. + * + * This object should have the same structure as [LatLng]{@link google.type.LatLng} + * + * @typedef LocationInfo + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.LocationInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var LocationInfo = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A `Property` consists of a user-supplied name/value pair. + * + * @property {string} name + * Name of the property. + * + * @property {string} value + * Value of the property. + * + * @property {number} uint64Value + * Value of numeric properties. + * + * @typedef Property + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Property definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var Property = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Set of detected entity features. + * + * @property {string} mid + * Opaque entity ID. Some IDs may be available in + * [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/). + * + * @property {string} locale + * The language code for the locale in which the entity textual + * `description` is expressed. + * + * @property {string} description + * Entity textual description, expressed in its `locale` language. + * + * @property {number} score + * Overall score of the result. Range [0, 1]. + * + * @property {number} confidence + * The accuracy of the entity detection in an image. + * For example, for an image in which the "Eiffel Tower" entity is detected, + * this field represents the confidence that there is a tower in the query + * image. Range [0, 1]. + * + * @property {number} topicality + * The relevancy of the ICA (Image Content Annotation) label to the + * image. For example, the relevancy of "tower" is likely higher to an image + * containing the detected "Eiffel Tower" than to an image containing a + * detected distant towering building, even though the confidence that + * there is a tower in each image may be the same. Range [0, 1]. + * + * @property {Object} boundingPoly + * Image region to which this entity belongs. Not produced + * for `LABEL_DETECTION` features. + * + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p1beta1.BoundingPoly} + * + * @property {Object[]} locations + * The location information for the detected entity. Multiple + * `LocationInfo` elements can be present because one location may + * indicate the location of the scene in the image, and another location + * may indicate the location of the place where the image was taken. + * Location information is usually present for landmarks. + * + * This object should have the same structure as [LocationInfo]{@link google.cloud.vision.v1p1beta1.LocationInfo} + * + * @property {Object[]} properties + * Some entities may have optional user-supplied `Property` (name/value) + * fields, such a score or string that qualifies the entity. + * + * This object should have the same structure as [Property]{@link google.cloud.vision.v1p1beta1.Property} + * + * @typedef EntityAnnotation + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.EntityAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var EntityAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Set of features pertaining to the image, computed by computer vision + * methods over safe-search verticals (for example, adult, spoof, medical, + * violence). + * + * @property {number} adult + * Represents the adult content likelihood for the image. Adult content may + * contain elements such as nudity, pornographic images or cartoons, or + * sexual activities. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} spoof + * Spoof likelihood. The likelihood that an modification + * was made to the image's canonical version to make it appear + * funny or offensive. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} medical + * Likelihood that this is a medical image. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} violence + * Likelihood that this image contains violent content. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @property {number} racy + * Likelihood that the request image contains racy content. Racy content may + * include (but is not limited to) skimpy or sheer clothing, strategically + * covered nudity, lewd or provocative poses, or close-ups of sensitive + * body areas. + * + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1p1beta1.Likelihood} + * + * @typedef SafeSearchAnnotation + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.SafeSearchAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var SafeSearchAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Rectangle determined by min and max `LatLng` pairs. + * + * @property {Object} minLatLng + * Min lat/long pair. + * + * This object should have the same structure as [LatLng]{@link google.type.LatLng} + * + * @property {Object} maxLatLng + * Max lat/long pair. + * + * This object should have the same structure as [LatLng]{@link google.type.LatLng} + * + * @typedef LatLongRect + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.LatLongRect definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var LatLongRect = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Color information consists of RGB channels, score, and the fraction of + * the image that the color occupies in the image. + * + * @property {Object} color + * RGB components of the color. + * + * This object should have the same structure as [Color]{@link google.type.Color} + * + * @property {number} score + * Image-specific score for this color. Value in range [0, 1]. + * + * @property {number} pixelFraction + * The fraction of pixels the color occupies in the image. + * Value in range [0, 1]. + * + * @typedef ColorInfo + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.ColorInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var ColorInfo = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Set of dominant colors and their corresponding scores. + * + * @property {Object[]} colors + * RGB color values with their score and pixel fraction. + * + * This object should have the same structure as [ColorInfo]{@link google.cloud.vision.v1p1beta1.ColorInfo} + * + * @typedef DominantColorsAnnotation + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.DominantColorsAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var DominantColorsAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Stores image properties, such as dominant colors. + * + * @property {Object} dominantColors + * If present, dominant colors completed successfully. + * + * This object should have the same structure as [DominantColorsAnnotation]{@link google.cloud.vision.v1p1beta1.DominantColorsAnnotation} + * + * @typedef ImageProperties + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.ImageProperties definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var ImageProperties = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Single crop hint that is used to generate a new crop when serving an image. + * + * @property {Object} boundingPoly + * The bounding polygon for the crop region. The coordinates of the bounding + * box are in the original image's scale, as returned in `ImageParams`. + * + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p1beta1.BoundingPoly} + * + * @property {number} confidence + * Confidence of this being a salient region. Range [0, 1]. + * + * @property {number} importanceFraction + * Fraction of importance of this salient region with respect to the original + * image. + * + * @typedef CropHint + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.CropHint definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var CropHint = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Set of crop hints that are used to generate new crops when serving images. + * + * @property {Object[]} cropHints + * Crop hint results. + * + * This object should have the same structure as [CropHint]{@link google.cloud.vision.v1p1beta1.CropHint} + * + * @typedef CropHintsAnnotation + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.CropHintsAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var CropHintsAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Parameters for crop hints annotation request. + * + * @property {number[]} aspectRatios + * Aspect ratios in floats, representing the ratio of the width to the height + * of the image. For example, if the desired aspect ratio is 4/3, the + * corresponding float value should be 1.33333. If not specified, the + * best possible crop is returned. The number of provided aspect ratios is + * limited to a maximum of 16; any aspect ratios provided after the 16th are + * ignored. + * + * @typedef CropHintsParams + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.CropHintsParams definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var CropHintsParams = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Parameters for web detection request. + * + * @property {boolean} includeGeoResults + * Whether to include results derived from the geo information in the image. + * + * @typedef WebDetectionParams + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.WebDetectionParams definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var WebDetectionParams = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Image context and/or feature-specific parameters. + * + * @property {Object} latLongRect + * lat/long rectangle that specifies the location of the image. + * + * This object should have the same structure as [LatLongRect]{@link google.cloud.vision.v1p1beta1.LatLongRect} + * + * @property {string[]} languageHints + * List of languages to use for TEXT_DETECTION. In most cases, an empty value + * yields the best results since it enables automatic language detection. For + * languages based on the Latin alphabet, setting `language_hints` is not + * needed. In rare cases, when the language of the text in the image is known, + * setting a hint will help get better results (although it will be a + * significant hindrance if the hint is wrong). Text detection returns an + * error if one or more of the specified languages is not one of the + * [supported languages](https://cloud.google.com/vision/docs/languages). + * + * @property {Object} cropHintsParams + * Parameters for crop hints annotation request. + * + * This object should have the same structure as [CropHintsParams]{@link google.cloud.vision.v1p1beta1.CropHintsParams} + * + * @property {Object} webDetectionParams + * Parameters for web detection. + * + * This object should have the same structure as [WebDetectionParams]{@link google.cloud.vision.v1p1beta1.WebDetectionParams} + * + * @typedef ImageContext + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.ImageContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var ImageContext = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Request for performing Google Cloud Vision API tasks over a user-provided + * image, with user-requested features. + * + * @property {Object} image + * The image to be processed. + * + * This object should have the same structure as [Image]{@link google.cloud.vision.v1p1beta1.Image} + * + * @property {Object[]} features + * Requested features. + * + * This object should have the same structure as [Feature]{@link google.cloud.vision.v1p1beta1.Feature} + * + * @property {Object} imageContext + * Additional context that may accompany the image. + * + * This object should have the same structure as [ImageContext]{@link google.cloud.vision.v1p1beta1.ImageContext} + * + * @typedef AnnotateImageRequest + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.AnnotateImageRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var AnnotateImageRequest = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Response to an image annotation request. + * + * @property {Object[]} faceAnnotations + * If present, face detection has completed successfully. + * + * This object should have the same structure as [FaceAnnotation]{@link google.cloud.vision.v1p1beta1.FaceAnnotation} + * + * @property {Object[]} landmarkAnnotations + * If present, landmark detection has completed successfully. + * + * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1p1beta1.EntityAnnotation} + * + * @property {Object[]} logoAnnotations + * If present, logo detection has completed successfully. + * + * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1p1beta1.EntityAnnotation} + * + * @property {Object[]} labelAnnotations + * If present, label detection has completed successfully. + * + * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1p1beta1.EntityAnnotation} + * + * @property {Object[]} textAnnotations + * If present, text (OCR) detection has completed successfully. + * + * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1p1beta1.EntityAnnotation} + * + * @property {Object} fullTextAnnotation + * If present, text (OCR) detection or document (OCR) text detection has + * completed successfully. + * This annotation provides the structural hierarchy for the OCR detected + * text. + * + * This object should have the same structure as [TextAnnotation]{@link google.cloud.vision.v1p1beta1.TextAnnotation} + * + * @property {Object} safeSearchAnnotation + * If present, safe-search annotation has completed successfully. + * + * This object should have the same structure as [SafeSearchAnnotation]{@link google.cloud.vision.v1p1beta1.SafeSearchAnnotation} + * + * @property {Object} imagePropertiesAnnotation + * If present, image properties were extracted successfully. + * + * This object should have the same structure as [ImageProperties]{@link google.cloud.vision.v1p1beta1.ImageProperties} + * + * @property {Object} cropHintsAnnotation + * If present, crop hints have completed successfully. + * + * This object should have the same structure as [CropHintsAnnotation]{@link google.cloud.vision.v1p1beta1.CropHintsAnnotation} + * + * @property {Object} webDetection + * If present, web detection has completed successfully. + * + * This object should have the same structure as [WebDetection]{@link google.cloud.vision.v1p1beta1.WebDetection} + * + * @property {Object} error + * If set, represents the error message for the operation. + * Note that filled-in image annotations are guaranteed to be + * correct, even when `error` is set. + * + * This object should have the same structure as [Status]{@link google.rpc.Status} + * + * @typedef AnnotateImageResponse + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.AnnotateImageResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var AnnotateImageResponse = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Multiple image annotation requests are batched into a single service call. + * + * @property {Object[]} requests + * Individual image annotation requests for this batch. + * + * This object should have the same structure as [AnnotateImageRequest]{@link google.cloud.vision.v1p1beta1.AnnotateImageRequest} + * + * @typedef BatchAnnotateImagesRequest + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var BatchAnnotateImagesRequest = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Response to a batch image annotation request. + * + * @property {Object[]} responses + * Individual responses to image annotation requests within the batch. + * + * This object should have the same structure as [AnnotateImageResponse]{@link google.cloud.vision.v1p1beta1.AnnotateImageResponse} + * + * @typedef BatchAnnotateImagesResponse + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/image_annotator.proto} + */ +var BatchAnnotateImagesResponse = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A bucketized representation of likelihood, which is intended to give clients + * highly stable results across model upgrades. + * + * @enum {number} + * @memberof google.cloud.vision.v1p1beta1 + */ +var Likelihood = { + + /** + * Unknown likelihood. + */ + UNKNOWN: 0, + + /** + * It is very unlikely that the image belongs to the specified vertical. + */ + VERY_UNLIKELY: 1, + + /** + * It is unlikely that the image belongs to the specified vertical. + */ + UNLIKELY: 2, + + /** + * It is possible that the image belongs to the specified vertical. + */ + POSSIBLE: 3, + + /** + * It is likely that the image belongs to the specified vertical. + */ + LIKELY: 4, + + /** + * It is very likely that the image belongs to the specified vertical. + */ + VERY_LIKELY: 5 +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_text_annotation.js b/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_text_annotation.js new file mode 100644 index 00000000000..23c3b7469ba --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_text_annotation.js @@ -0,0 +1,386 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * TextAnnotation contains a structured representation of OCR extracted text. + * The hierarchy of an OCR extracted text structure is like this: + * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol + * Each structural component, starting from Page, may further have their own + * properties. Properties describe detected languages, breaks etc.. Please refer + * to the + * TextAnnotation.TextProperty + * message definition below for more detail. + * + * @property {Object[]} pages + * List of pages detected by OCR. + * + * This object should have the same structure as [Page]{@link google.cloud.vision.v1p1beta1.Page} + * + * @property {string} text + * UTF-8 text detected on the pages. + * + * @typedef TextAnnotation + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.TextAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/text_annotation.proto} + */ +var TextAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Detected language for a structural component. + * + * @property {string} languageCode + * The BCP-47 language code, such as "en-US" or "sr-Latn". For more + * information, see + * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + * + * @property {number} confidence + * Confidence of detected language. Range [0, 1]. + * + * @typedef DetectedLanguage + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.TextAnnotation.DetectedLanguage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/text_annotation.proto} + */ + DetectedLanguage: { + // This is for documentation. Actual contents will be loaded by gRPC. + }, + + /** + * Detected start or end of a structural component. + * + * @property {number} type + * Detected break type. + * + * The number should be among the values of [BreakType]{@link google.cloud.vision.v1p1beta1.BreakType} + * + * @property {boolean} isPrefix + * True if break prepends the element. + * + * @typedef DetectedBreak + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.TextAnnotation.DetectedBreak definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/text_annotation.proto} + */ + DetectedBreak: { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Enum to denote the type of break found. New line, space etc. + * + * @enum {number} + * @memberof google.cloud.vision.v1p1beta1 + */ + BreakType: { + + /** + * Unknown break label type. + */ + UNKNOWN: 0, + + /** + * Regular space. + */ + SPACE: 1, + + /** + * Sure space (very wide). + */ + SURE_SPACE: 2, + + /** + * Line-wrapping break. + */ + EOL_SURE_SPACE: 3, + + /** + * End-line hyphen that is not present in text; does not co-occur with + * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. + */ + HYPHEN: 4, + + /** + * Line break that ends a paragraph. + */ + LINE_BREAK: 5 + } + }, + + /** + * Additional information detected on the structural component. + * + * @property {Object[]} detectedLanguages + * A list of detected languages together with confidence. + * + * This object should have the same structure as [DetectedLanguage]{@link google.cloud.vision.v1p1beta1.DetectedLanguage} + * + * @property {Object} detectedBreak + * Detected start or end of a text segment. + * + * This object should have the same structure as [DetectedBreak]{@link google.cloud.vision.v1p1beta1.DetectedBreak} + * + * @typedef TextProperty + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.TextAnnotation.TextProperty definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/text_annotation.proto} + */ + TextProperty: { + // This is for documentation. Actual contents will be loaded by gRPC. + } +}; + +/** + * Detected page from OCR. + * + * @property {Object} property + * Additional information detected on the page. + * + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p1beta1.TextProperty} + * + * @property {number} width + * Page width in pixels. + * + * @property {number} height + * Page height in pixels. + * + * @property {Object[]} blocks + * List of blocks of text, images etc on this page. + * + * This object should have the same structure as [Block]{@link google.cloud.vision.v1p1beta1.Block} + * + * @property {number} confidence + * Confidence of the OCR results on the page. Range [0, 1]. + * + * @typedef Page + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Page definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/text_annotation.proto} + */ +var Page = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Logical element on the page. + * + * @property {Object} property + * Additional information detected for the block. + * + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p1beta1.TextProperty} + * + * @property {Object} boundingBox + * The bounding box for the block. + * The vertices are in the order of top-left, top-right, bottom-right, + * bottom-left. When a rotation of the bounding box is detected the rotation + * is represented as around the top-left corner as defined when the text is + * read in the 'natural' orientation. + * For example: + * * when the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * * when it's rotated 180 degrees around the top-left corner it becomes: + * 2----3 + * | | + * 1----0 + * and the vertice order will still be (0, 1, 2, 3). + * + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p1beta1.BoundingPoly} + * + * @property {Object[]} paragraphs + * List of paragraphs in this block (if this blocks is of type text). + * + * This object should have the same structure as [Paragraph]{@link google.cloud.vision.v1p1beta1.Paragraph} + * + * @property {number} blockType + * Detected block type (text, image etc) for this block. + * + * The number should be among the values of [BlockType]{@link google.cloud.vision.v1p1beta1.BlockType} + * + * @property {number} confidence + * Confidence of the OCR results on the block. Range [0, 1]. + * + * @typedef Block + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Block definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/text_annotation.proto} + */ +var Block = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Type of a block (text, image etc) as identified by OCR. + * + * @enum {number} + * @memberof google.cloud.vision.v1p1beta1 + */ + BlockType: { + + /** + * Unknown block type. + */ + UNKNOWN: 0, + + /** + * Regular text block. + */ + TEXT: 1, + + /** + * Table block. + */ + TABLE: 2, + + /** + * Image block. + */ + PICTURE: 3, + + /** + * Horizontal/vertical line box. + */ + RULER: 4, + + /** + * Barcode block. + */ + BARCODE: 5 + } +}; + +/** + * Structural unit of text representing a number of words in certain order. + * + * @property {Object} property + * Additional information detected for the paragraph. + * + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p1beta1.TextProperty} + * + * @property {Object} boundingBox + * The bounding box for the paragraph. + * The vertices are in the order of top-left, top-right, bottom-right, + * bottom-left. When a rotation of the bounding box is detected the rotation + * is represented as around the top-left corner as defined when the text is + * read in the 'natural' orientation. + * For example: + * * when the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * * when it's rotated 180 degrees around the top-left corner it becomes: + * 2----3 + * | | + * 1----0 + * and the vertice order will still be (0, 1, 2, 3). + * + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p1beta1.BoundingPoly} + * + * @property {Object[]} words + * List of words in this paragraph. + * + * This object should have the same structure as [Word]{@link google.cloud.vision.v1p1beta1.Word} + * + * @property {number} confidence + * Confidence of the OCR results for the paragraph. Range [0, 1]. + * + * @typedef Paragraph + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Paragraph definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/text_annotation.proto} + */ +var Paragraph = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A word representation. + * + * @property {Object} property + * Additional information detected for the word. + * + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p1beta1.TextProperty} + * + * @property {Object} boundingBox + * The bounding box for the word. + * The vertices are in the order of top-left, top-right, bottom-right, + * bottom-left. When a rotation of the bounding box is detected the rotation + * is represented as around the top-left corner as defined when the text is + * read in the 'natural' orientation. + * For example: + * * when the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * * when it's rotated 180 degrees around the top-left corner it becomes: + * 2----3 + * | | + * 1----0 + * and the vertice order will still be (0, 1, 2, 3). + * + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p1beta1.BoundingPoly} + * + * @property {Object[]} symbols + * List of symbols in the word. + * The order of the symbols follows the natural reading order. + * + * This object should have the same structure as [Symbol]{@link google.cloud.vision.v1p1beta1.Symbol} + * + * @property {number} confidence + * Confidence of the OCR results for the word. Range [0, 1]. + * + * @typedef Word + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Word definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/text_annotation.proto} + */ +var Word = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A single symbol representation. + * + * @property {Object} property + * Additional information detected for the symbol. + * + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1p1beta1.TextProperty} + * + * @property {Object} boundingBox + * The bounding box for the symbol. + * The vertices are in the order of top-left, top-right, bottom-right, + * bottom-left. When a rotation of the bounding box is detected the rotation + * is represented as around the top-left corner as defined when the text is + * read in the 'natural' orientation. + * For example: + * * when the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * * when it's rotated 180 degrees around the top-left corner it becomes: + * 2----3 + * | | + * 1----0 + * and the vertice order will still be (0, 1, 2, 3). + * + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1p1beta1.BoundingPoly} + * + * @property {string} text + * The actual UTF-8 representation of the symbol. + * + * @property {number} confidence + * Confidence of the OCR results for the symbol. Range [0, 1]. + * + * @typedef Symbol + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.Symbol definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/text_annotation.proto} + */ +var Symbol = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_web_detection.js b/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_web_detection.js new file mode 100644 index 00000000000..a32989ae19c --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/doc/google/cloud/vision/v1p1beta1/doc_web_detection.js @@ -0,0 +1,151 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * Relevant information for the image from the Internet. + * + * @property {Object[]} webEntities + * Deduced entities from similar images on the Internet. + * + * This object should have the same structure as [WebEntity]{@link google.cloud.vision.v1p1beta1.WebEntity} + * + * @property {Object[]} fullMatchingImages + * Fully matching images from the Internet. + * Can include resized copies of the query image. + * + * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p1beta1.WebImage} + * + * @property {Object[]} partialMatchingImages + * Partial matching images from the Internet. + * Those images are similar enough to share some key-point features. For + * example an original image will likely have partial matching for its crops. + * + * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p1beta1.WebImage} + * + * @property {Object[]} pagesWithMatchingImages + * Web pages containing the matching images from the Internet. + * + * This object should have the same structure as [WebPage]{@link google.cloud.vision.v1p1beta1.WebPage} + * + * @property {Object[]} visuallySimilarImages + * The visually similar image results. + * + * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p1beta1.WebImage} + * + * @property {Object[]} bestGuessLabels + * Best guess text labels for the request image. + * + * This object should have the same structure as [WebLabel]{@link google.cloud.vision.v1p1beta1.WebLabel} + * + * @typedef WebDetection + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.WebDetection definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/web_detection.proto} + */ +var WebDetection = { + // This is for documentation. Actual contents will be loaded by gRPC. + + /** + * Entity deduced from similar images on the Internet. + * + * @property {string} entityId + * Opaque entity ID. + * + * @property {number} score + * Overall relevancy score for the entity. + * Not normalized and not comparable across different image queries. + * + * @property {string} description + * Canonical description of the entity, in English. + * + * @typedef WebEntity + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.WebDetection.WebEntity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/web_detection.proto} + */ + WebEntity: { + // This is for documentation. Actual contents will be loaded by gRPC. + }, + + /** + * Metadata for online images. + * + * @property {string} url + * The result image URL. + * + * @property {number} score + * (Deprecated) Overall relevancy score for the image. + * + * @typedef WebImage + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.WebDetection.WebImage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/web_detection.proto} + */ + WebImage: { + // This is for documentation. Actual contents will be loaded by gRPC. + }, + + /** + * Metadata for web pages. + * + * @property {string} url + * The result web page URL. + * + * @property {number} score + * (Deprecated) Overall relevancy score for the web page. + * + * @property {string} pageTitle + * Title for the web page, may contain HTML markups. + * + * @property {Object[]} fullMatchingImages + * Fully matching images on the page. + * Can include resized copies of the query image. + * + * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p1beta1.WebImage} + * + * @property {Object[]} partialMatchingImages + * Partial matching images on the page. + * Those images are similar enough to share some key-point features. For + * example an original image will likely have partial matching for its + * crops. + * + * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1p1beta1.WebImage} + * + * @typedef WebPage + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.WebDetection.WebPage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/web_detection.proto} + */ + WebPage: { + // This is for documentation. Actual contents will be loaded by gRPC. + }, + + /** + * Label to provide extra metadata for the web detection. + * + * @property {string} label + * Label for extra metadata. + * + * @property {string} languageCode + * The BCP-47 language code for `label`, such as "en-US" or "sr-Latn". + * For more information, see + * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + * + * @typedef WebLabel + * @memberof google.cloud.vision.v1p1beta1 + * @see [google.cloud.vision.v1p1beta1.WebDetection.WebLabel definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1p1beta1/web_detection.proto} + */ + WebLabel: { + // This is for documentation. Actual contents will be loaded by gRPC. + } +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1p1beta1/doc/google/protobuf/doc_any.js b/packages/google-cloud-vision/src/v1p1beta1/doc/google/protobuf/doc_any.js new file mode 100644 index 00000000000..21feb744243 --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/doc/google/protobuf/doc_any.js @@ -0,0 +1,131 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * `Any` contains an arbitrary serialized protocol buffer message along with a + * URL that describes the type of the serialized message. + * + * Protobuf library provides support to pack/unpack Any values in the form + * of utility functions or additional generated methods of the Any type. + * + * Example 1: Pack and unpack a message in C++. + * + * Foo foo = ...; + * Any any; + * any.PackFrom(foo); + * ... + * if (any.UnpackTo(&foo)) { + * ... + * } + * + * Example 2: Pack and unpack a message in Java. + * + * Foo foo = ...; + * Any any = Any.pack(foo); + * ... + * if (any.is(Foo.class)) { + * foo = any.unpack(Foo.class); + * } + * + * Example 3: Pack and unpack a message in Python. + * + * foo = Foo(...) + * any = Any() + * any.Pack(foo) + * ... + * if any.Is(Foo.DESCRIPTOR): + * any.Unpack(foo) + * ... + * + * Example 4: Pack and unpack a message in Go + * + * foo := &pb.Foo{...} + * any, err := ptypes.MarshalAny(foo) + * ... + * foo := &pb.Foo{} + * if err := ptypes.UnmarshalAny(any, foo); err != nil { + * ... + * } + * + * The pack methods provided by protobuf library will by default use + * 'type.googleapis.com/full.type.name' as the type URL and the unpack + * methods only use the fully qualified type name after the last '/' + * in the type URL, for example "foo.bar.com/x/y.z" will yield type + * name "y.z". + * + * + * # JSON + * + * The JSON representation of an `Any` value uses the regular + * representation of the deserialized, embedded message, with an + * additional field `@type` which contains the type URL. Example: + * + * package google.profile; + * message Person { + * string first_name = 1; + * string last_name = 2; + * } + * + * { + * "@type": "type.googleapis.com/google.profile.Person", + * "firstName": , + * "lastName": + * } + * + * If the embedded message type is well-known and has a custom JSON + * representation, that representation will be embedded adding a field + * `value` which holds the custom JSON in addition to the `@type` + * field. Example (for message google.protobuf.Duration): + * + * { + * "@type": "type.googleapis.com/google.protobuf.Duration", + * "value": "1.212s" + * } + * + * @property {string} typeUrl + * A URL/resource name whose content describes the type of the + * serialized protocol buffer message. + * + * For URLs which use the scheme `http`, `https`, or no scheme, the + * following restrictions and interpretations apply: + * + * * If no scheme is provided, `https` is assumed. + * * The last segment of the URL's path must represent the fully + * qualified name of the type (as in `path/google.protobuf.Duration`). + * The name should be in a canonical form (e.g., leading "." is + * not accepted). + * * An HTTP GET on the URL must yield a google.protobuf.Type + * value in binary format, or produce an error. + * * Applications are allowed to cache lookup results based on the + * URL, or have them precompiled into a binary to avoid any + * lookup. Therefore, binary compatibility needs to be preserved + * on changes to types. (Use versioned type names to manage + * breaking changes.) + * + * Schemes other than `http`, `https` (or the empty scheme) might be + * used with implementation specific semantics. + * + * @property {string} value + * Must be a valid serialized protocol buffer of the above specified type. + * + * @typedef Any + * @memberof google.protobuf + * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto} + */ +var Any = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1p1beta1/doc/google/protobuf/doc_wrappers.js b/packages/google-cloud-vision/src/v1p1beta1/doc/google/protobuf/doc_wrappers.js new file mode 100644 index 00000000000..0ccf17f236b --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/doc/google/protobuf/doc_wrappers.js @@ -0,0 +1,160 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * Wrapper message for `double`. + * + * The JSON representation for `DoubleValue` is JSON number. + * + * @property {number} value + * The double value. + * + * @typedef DoubleValue + * @memberof google.protobuf + * @see [google.protobuf.DoubleValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ +var DoubleValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Wrapper message for `float`. + * + * The JSON representation for `FloatValue` is JSON number. + * + * @property {number} value + * The float value. + * + * @typedef FloatValue + * @memberof google.protobuf + * @see [google.protobuf.FloatValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ +var FloatValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Wrapper message for `int64`. + * + * The JSON representation for `Int64Value` is JSON string. + * + * @property {number} value + * The int64 value. + * + * @typedef Int64Value + * @memberof google.protobuf + * @see [google.protobuf.Int64Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ +var Int64Value = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Wrapper message for `uint64`. + * + * The JSON representation for `UInt64Value` is JSON string. + * + * @property {number} value + * The uint64 value. + * + * @typedef UInt64Value + * @memberof google.protobuf + * @see [google.protobuf.UInt64Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ +var UInt64Value = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Wrapper message for `int32`. + * + * The JSON representation for `Int32Value` is JSON number. + * + * @property {number} value + * The int32 value. + * + * @typedef Int32Value + * @memberof google.protobuf + * @see [google.protobuf.Int32Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ +var Int32Value = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Wrapper message for `uint32`. + * + * The JSON representation for `UInt32Value` is JSON number. + * + * @property {number} value + * The uint32 value. + * + * @typedef UInt32Value + * @memberof google.protobuf + * @see [google.protobuf.UInt32Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ +var UInt32Value = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Wrapper message for `bool`. + * + * The JSON representation for `BoolValue` is JSON `true` and `false`. + * + * @property {boolean} value + * The bool value. + * + * @typedef BoolValue + * @memberof google.protobuf + * @see [google.protobuf.BoolValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ +var BoolValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Wrapper message for `string`. + * + * The JSON representation for `StringValue` is JSON string. + * + * @property {string} value + * The string value. + * + * @typedef StringValue + * @memberof google.protobuf + * @see [google.protobuf.StringValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ +var StringValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Wrapper message for `bytes`. + * + * The JSON representation for `BytesValue` is JSON string. + * + * @property {string} value + * The bytes value. + * + * @typedef BytesValue + * @memberof google.protobuf + * @see [google.protobuf.BytesValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} + */ +var BytesValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1p1beta1/doc/google/rpc/doc_status.js b/packages/google-cloud-vision/src/v1p1beta1/doc/google/rpc/doc_status.js new file mode 100644 index 00000000000..be5e96ce26d --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/doc/google/rpc/doc_status.js @@ -0,0 +1,92 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * The `Status` type defines a logical error model that is suitable for different + * programming environments, including REST APIs and RPC APIs. It is used by + * [gRPC](https://github.com/grpc). The error model is designed to be: + * + * - Simple to use and understand for most users + * - Flexible enough to meet unexpected needs + * + * # Overview + * + * The `Status` message contains three pieces of data: error code, error message, + * and error details. The error code should be an enum value of + * google.rpc.Code, but it may accept additional error codes if needed. The + * error message should be a developer-facing English message that helps + * developers *understand* and *resolve* the error. If a localized user-facing + * error message is needed, put the localized message in the error details or + * localize it in the client. The optional error details may contain arbitrary + * information about the error. There is a predefined set of error detail types + * in the package `google.rpc` that can be used for common error conditions. + * + * # Language mapping + * + * The `Status` message is the logical representation of the error model, but it + * is not necessarily the actual wire format. When the `Status` message is + * exposed in different client libraries and different wire protocols, it can be + * mapped differently. For example, it will likely be mapped to some exceptions + * in Java, but more likely mapped to some error codes in C. + * + * # Other uses + * + * The error model and the `Status` message can be used in a variety of + * environments, either with or without APIs, to provide a + * consistent developer experience across different environments. + * + * Example uses of this error model include: + * + * - Partial errors. If a service needs to return partial errors to the client, + * it may embed the `Status` in the normal response to indicate the partial + * errors. + * + * - Workflow errors. A typical workflow has multiple steps. Each step may + * have a `Status` message for error reporting. + * + * - Batch operations. If a client uses batch request and batch response, the + * `Status` message should be used directly inside batch response, one for + * each error sub-response. + * + * - Asynchronous operations. If an API call embeds asynchronous operation + * results in its response, the status of those operations should be + * represented directly using the `Status` message. + * + * - Logging. If some API errors are stored in logs, the message `Status` could + * be used directly after any stripping needed for security/privacy reasons. + * + * @property {number} code + * The status code, which should be an enum value of google.rpc.Code. + * + * @property {string} message + * A developer-facing error message, which should be in English. Any + * user-facing error message should be localized and sent in the + * google.rpc.Status.details field, or localized by the client. + * + * @property {Object[]} details + * A list of messages that carry the error details. There is a common set of + * message types for APIs to use. + * + * This object should have the same structure as [Any]{@link google.protobuf.Any} + * + * @typedef Status + * @memberof google.rpc + * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto} + */ +var Status = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1p1beta1/doc/google/type/doc_color.js b/packages/google-cloud-vision/src/v1p1beta1/doc/google/type/doc_color.js new file mode 100644 index 00000000000..a2ea753d8a1 --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/doc/google/type/doc_color.js @@ -0,0 +1,164 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * Represents a color in the RGBA color space. This representation is designed + * for simplicity of conversion to/from color representations in various + * languages over compactness; for example, the fields of this representation + * can be trivially provided to the constructor of "java.awt.Color" in Java; it + * can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha" + * method in iOS; and, with just a little work, it can be easily formatted into + * a CSS "rgba()" string in JavaScript, as well. Here are some examples: + * + * Example (Java): + * + * import com.google.type.Color; + * + * // ... + * public static java.awt.Color fromProto(Color protocolor) { + * float alpha = protocolor.hasAlpha() + * ? protocolor.getAlpha().getValue() + * : 1.0; + * + * return new java.awt.Color( + * protocolor.getRed(), + * protocolor.getGreen(), + * protocolor.getBlue(), + * alpha); + * } + * + * public static Color toProto(java.awt.Color color) { + * float red = (float) color.getRed(); + * float green = (float) color.getGreen(); + * float blue = (float) color.getBlue(); + * float denominator = 255.0; + * Color.Builder resultBuilder = + * Color + * .newBuilder() + * .setRed(red / denominator) + * .setGreen(green / denominator) + * .setBlue(blue / denominator); + * int alpha = color.getAlpha(); + * if (alpha != 255) { + * result.setAlpha( + * FloatValue + * .newBuilder() + * .setValue(((float) alpha) / denominator) + * .build()); + * } + * return resultBuilder.build(); + * } + * // ... + * + * Example (iOS / Obj-C): + * + * // ... + * static UIColor* fromProto(Color* protocolor) { + * float red = [protocolor red]; + * float green = [protocolor green]; + * float blue = [protocolor blue]; + * FloatValue* alpha_wrapper = [protocolor alpha]; + * float alpha = 1.0; + * if (alpha_wrapper != nil) { + * alpha = [alpha_wrapper value]; + * } + * return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; + * } + * + * static Color* toProto(UIColor* color) { + * CGFloat red, green, blue, alpha; + * if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { + * return nil; + * } + * Color* result = [Color alloc] init]; + * [result setRed:red]; + * [result setGreen:green]; + * [result setBlue:blue]; + * if (alpha <= 0.9999) { + * [result setAlpha:floatWrapperWithValue(alpha)]; + * } + * [result autorelease]; + * return result; + * } + * // ... + * + * Example (JavaScript): + * + * // ... + * + * var protoToCssColor = function(rgb_color) { + * var redFrac = rgb_color.red || 0.0; + * var greenFrac = rgb_color.green || 0.0; + * var blueFrac = rgb_color.blue || 0.0; + * var red = Math.floor(redFrac * 255); + * var green = Math.floor(greenFrac * 255); + * var blue = Math.floor(blueFrac * 255); + * + * if (!('alpha' in rgb_color)) { + * return rgbToCssColor_(red, green, blue); + * } + * + * var alphaFrac = rgb_color.alpha.value || 0.0; + * var rgbParams = [red, green, blue].join(','); + * return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); + * }; + * + * var rgbToCssColor_ = function(red, green, blue) { + * var rgbNumber = new Number((red << 16) | (green << 8) | blue); + * var hexString = rgbNumber.toString(16); + * var missingZeros = 6 - hexString.length; + * var resultBuilder = ['#']; + * for (var i = 0; i < missingZeros; i++) { + * resultBuilder.push('0'); + * } + * resultBuilder.push(hexString); + * return resultBuilder.join(''); + * }; + * + * // ... + * + * @property {number} red + * The amount of red in the color as a value in the interval [0, 1]. + * + * @property {number} green + * The amount of green in the color as a value in the interval [0, 1]. + * + * @property {number} blue + * The amount of blue in the color as a value in the interval [0, 1]. + * + * @property {Object} alpha + * The fraction of this color that should be applied to the pixel. That is, + * the final pixel color is defined by the equation: + * + * pixel color = alpha * (this color) + (1.0 - alpha) * (background color) + * + * This means that a value of 1.0 corresponds to a solid color, whereas + * a value of 0.0 corresponds to a completely transparent color. This + * uses a wrapper message rather than a simple float scalar so that it is + * possible to distinguish between a default value and the value being unset. + * If omitted, this color object is to be rendered as a solid color + * (as if the alpha value had been explicitly given with a value of 1.0). + * + * This object should have the same structure as [FloatValue]{@link google.protobuf.FloatValue} + * + * @typedef Color + * @memberof google.type + * @see [google.type.Color definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/type/color.proto} + */ +var Color = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1p1beta1/doc/google/type/doc_latlng.js b/packages/google-cloud-vision/src/v1p1beta1/doc/google/type/doc_latlng.js new file mode 100644 index 00000000000..e54b84d63c7 --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/doc/google/type/doc_latlng.js @@ -0,0 +1,71 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * An object representing a latitude/longitude pair. This is expressed as a pair + * of doubles representing degrees latitude and degrees longitude. Unless + * specified otherwise, this must conform to the + * WGS84 + * standard. Values must be within normalized ranges. + * + * Example of normalization code in Python: + * + * def NormalizeLongitude(longitude): + * """Wraps decimal degrees longitude to [-180.0, 180.0].""" + * q, r = divmod(longitude, 360.0) + * if r > 180.0 or (r == 180.0 and q <= -1.0): + * return r - 360.0 + * return r + * + * def NormalizeLatLng(latitude, longitude): + * """Wraps decimal degrees latitude and longitude to + * [-90.0, 90.0] and [-180.0, 180.0], respectively.""" + * r = latitude % 360.0 + * if r <= 90.0: + * return r, NormalizeLongitude(longitude) + * elif r >= 270.0: + * return r - 360, NormalizeLongitude(longitude) + * else: + * return 180 - r, NormalizeLongitude(longitude + 180.0) + * + * assert 180.0 == NormalizeLongitude(180.0) + * assert -180.0 == NormalizeLongitude(-180.0) + * assert -179.0 == NormalizeLongitude(181.0) + * assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) + * assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) + * assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) + * assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) + * assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) + * assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) + * assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) + * assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) + * assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) + * assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) + * + * @property {number} latitude + * The latitude in degrees. It must be in the range [-90.0, +90.0]. + * + * @property {number} longitude + * The longitude in degrees. It must be in the range [-180.0, +180.0]. + * + * @typedef LatLng + * @memberof google.type + * @see [google.type.LatLng definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/type/latlng.proto} + */ +var LatLng = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1p1beta1/image_annotator_client.js b/packages/google-cloud-vision/src/v1p1beta1/image_annotator_client.js new file mode 100644 index 00000000000..cd731ef03bb --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/image_annotator_client.js @@ -0,0 +1,226 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const gapicConfig = require('./image_annotator_client_config'); +const gax = require('google-gax'); +const merge = require('lodash.merge'); +const path = require('path'); + +const VERSION = require('../../package.json').version; + +/** + * Service that performs Google Cloud Vision API detection tasks over client + * images, such as face, landmark, logo, label, and text detection. The + * ImageAnnotator service returns detected entities from the images. + * + * @class + * @memberof v1p1beta1 + */ +class ImageAnnotatorClient { + /** + * Construct an instance of ImageAnnotatorClient. + * + * @param {object} [options] - The configuration object. See the subsequent + * parameters for more details. + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * usaing a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option above is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {function} [options.promise] - Custom promise module to use instead + * of native Promises. + * @param {string} [options.servicePath] - The domain name of the + * API remote host. + */ + constructor(opts) { + this._descriptors = {}; + + // Ensure that options include the service address and port. + opts = Object.assign( + { + clientConfig: {}, + port: this.constructor.port, + servicePath: this.constructor.servicePath, + }, + opts + ); + + // Create a `gaxGrpc` object, with any grpc-specific options + // sent to the client. + opts.scopes = this.constructor.scopes; + var gaxGrpc = gax.grpc(opts); + + // Save the auth object to the client, for use by other methods. + this.auth = gaxGrpc.auth; + + // Determine the client header string. + var clientHeader = [ + `gl-node/${process.version.node}`, + `grpc/${gaxGrpc.grpcVersion}`, + `gax/${gax.version}`, + `gapic/${VERSION}`, + ]; + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + var protos = merge( + {}, + gaxGrpc.loadProto( + path.join(__dirname, '..', '..', 'protos'), + 'google/cloud/vision/v1p1beta1/image_annotator.proto' + ) + ); + + // Put together the default options sent with requests. + var defaults = gaxGrpc.constructSettings( + 'google.cloud.vision.v1p1beta1.ImageAnnotator', + gapicConfig, + opts.clientConfig, + {'x-goog-api-client': clientHeader.join(' ')} + ); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this._innerApiCalls = {}; + + // Put together the "service stub" for + // google.cloud.vision.v1p1beta1.ImageAnnotator. + var imageAnnotatorStub = gaxGrpc.createStub( + protos.google.cloud.vision.v1p1beta1.ImageAnnotator, + opts + ); + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + var imageAnnotatorStubMethods = ['batchAnnotateImages']; + for (let methodName of imageAnnotatorStubMethods) { + this._innerApiCalls[methodName] = gax.createApiCall( + imageAnnotatorStub.then( + stub => + function() { + var args = Array.prototype.slice.call(arguments, 0); + return stub[methodName].apply(stub, args); + } + ), + defaults[methodName], + null + ); + } + } + + /** + * The DNS address for this API service. + */ + static get servicePath() { + return 'vision.googleapis.com'; + } + + /** + * The port for this API service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-vision', + ]; + } + + /** + * Return the project ID used by this class. + * @param {function(Error, string)} callback - the callback to + * be called with the current project Id. + */ + getProjectId(callback) { + return this.auth.getProjectId(callback); + } + + // ------------------- + // -- Service calls -- + // ------------------- + + /** + * Run image detection and annotation for a batch of images. + * + * @param {Object} request + * The request object that will be sent. + * @param {Object[]} request.requests + * Individual image annotation requests for this batch. + * + * This object should have the same structure as [AnnotateImageRequest]{@link google.cloud.vision.v1p1beta1.AnnotateImageRequest} + * @param {Object} [options] + * Optional parameters. You can override the default settings for this call, e.g, timeout, + * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details. + * @param {function(?Error, ?Object)} [callback] + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is an object representing [BatchAnnotateImagesResponse]{@link google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse}. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [BatchAnnotateImagesResponse]{@link google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API call. + * + * @example + * + * const vision = require('@google-cloud/vision'); + * + * var client = new vision.v1p1beta1.ImageAnnotatorClient({ + * // optional auth parameters. + * }); + * + * var requests = []; + * client.batchAnnotateImages({requests: requests}) + * .then(responses => { + * var response = responses[0]; + * // doThingsWith(response) + * }) + * .catch(err => { + * console.error(err); + * }); + */ + batchAnnotateImages(request, options, callback) { + if (options instanceof Function && callback === undefined) { + callback = options; + options = {}; + } + options = options || {}; + + return this._innerApiCalls.batchAnnotateImages(request, options, callback); + } +} + +module.exports = ImageAnnotatorClient; diff --git a/packages/google-cloud-vision/src/v1p1beta1/image_annotator_client_config.json b/packages/google-cloud-vision/src/v1p1beta1/image_annotator_client_config.json new file mode 100644 index 00000000000..55c365e3f80 --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/image_annotator_client_config.json @@ -0,0 +1,31 @@ +{ + "interfaces": { + "google.cloud.vision.v1p1beta1.ImageAnnotator": { + "retry_codes": { + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "non_idempotent": [] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "BatchAnnotateImages": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/packages/google-cloud-vision/src/v1p1beta1/index.js b/packages/google-cloud-vision/src/v1p1beta1/index.js new file mode 100644 index 00000000000..a41489d4fd3 --- /dev/null +++ b/packages/google-cloud-vision/src/v1p1beta1/index.js @@ -0,0 +1,19 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const ImageAnnotatorClient = require('./image_annotator_client'); + +module.exports.ImageAnnotatorClient = ImageAnnotatorClient; diff --git a/packages/google-cloud-vision/test/gapic-v1p1beta1.js b/packages/google-cloud-vision/test/gapic-v1p1beta1.js new file mode 100644 index 00000000000..404f21c8dfb --- /dev/null +++ b/packages/google-cloud-vision/test/gapic-v1p1beta1.js @@ -0,0 +1,95 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const assert = require('assert'); + +const visionModule = require('../src'); + +var FAKE_STATUS_CODE = 1; +var error = new Error(); +error.code = FAKE_STATUS_CODE; + +describe('ImageAnnotatorClient', () => { + describe('batchAnnotateImages', () => { + it('invokes batchAnnotateImages without error', done => { + var client = new visionModule.v1p1beta1.ImageAnnotatorClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + + // Mock request + var requests = []; + var request = { + requests: requests, + }; + + // Mock response + var expectedResponse = {}; + + // Mock Grpc layer + client._innerApiCalls.batchAnnotateImages = mockSimpleGrpcMethod( + request, + expectedResponse + ); + + client.batchAnnotateImages(request, (err, response) => { + assert.ifError(err); + assert.deepStrictEqual(response, expectedResponse); + done(); + }); + }); + + it('invokes batchAnnotateImages with error', done => { + var client = new visionModule.v1p1beta1.ImageAnnotatorClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + + // Mock request + var requests = []; + var request = { + requests: requests, + }; + + // Mock Grpc layer + client._innerApiCalls.batchAnnotateImages = mockSimpleGrpcMethod( + request, + null, + error + ); + + client.batchAnnotateImages(request, (err, response) => { + assert(err instanceof Error); + assert.equal(err.code, FAKE_STATUS_CODE); + assert(typeof response === 'undefined'); + done(); + }); + }); + }); +}); + +function mockSimpleGrpcMethod(expectedRequest, response, error) { + return function(actualRequest, options, callback) { + assert.deepStrictEqual(actualRequest, expectedRequest); + if (error) { + callback(error); + } else if (response) { + callback(null, response); + } else { + callback(null); + } + }; +}