diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py index 50cd56e3c61c..db22d1d994e0 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py @@ -9,7 +9,7 @@ # regenerated. # -------------------------------------------------------------------------- -from msrest.service_client import ServiceClient +from msrest.service_client import SDKClient from msrest import Configuration, Serializer, Deserializer from .version import VERSION from msrest.pipeline import ClientRawResponse @@ -51,7 +51,7 @@ def __init__( self.credentials = credentials -class ComputerVisionAPI(object): +class ComputerVisionAPI(SDKClient): """The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively. :ivar config: Configuration for client. @@ -73,7 +73,7 @@ def __init__( self, azure_region, credentials): self.config = ComputerVisionAPIConfiguration(azure_region, credentials) - self._client = ServiceClient(self.config.credentials, self.config) + super(ComputerVisionAPI, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self.api_version = '1.0' @@ -103,7 +103,7 @@ def list_models( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/models' + url = self.list_models.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -135,6 +135,7 @@ def list_models( return client_raw_response return deserialized + list_models.metadata = {'url': '/models'} def analyze_image( self, url, visual_features=None, details=None, language="en", custom_headers=None, raw=False, **operation_config): @@ -144,7 +145,7 @@ def analyze_image( optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. - :param url: + :param url: Publicly reachable URL of an image :type url: str :param visual_features: A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual @@ -166,13 +167,12 @@ def analyze_image( in the image. :type details: list[str or ~azure.cognitiveservices.vision.computervision.models.Details] - :param language: A string indicating which language to return. The - service will return recognition results in specified language. If this + :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.zh - - Simplified Chinese. Possible values include: 'en', 'zh' - :type language: str or - ~azure.cognitiveservices.vision.computervision.models.Language1 + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -188,7 +188,7 @@ def analyze_image( image_url = models.ImageUrl(url=url) # Construct URL - url = '/analyze' + url = self.analyze_image.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -201,7 +201,7 @@ def analyze_image( if details is not None: query_parameters['details'] = self._serialize.query("details", details, '[Details]', div=',') if language is not None: - query_parameters['language'] = self._serialize.query("language", language, 'Language1') + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -230,6 +230,7 @@ def analyze_image( return client_raw_response return deserialized + analyze_image.metadata = {'url': '/analyze'} def generate_thumbnail( self, width, height, url, smart_cropping=False, custom_headers=None, raw=False, callback=None, **operation_config): @@ -248,7 +249,7 @@ def generate_thumbnail( :param height: Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. :type height: int - :param url: + :param url: Publicly reachable URL of an image :type url: str :param smart_cropping: Boolean flag for enabling smart cropping. :type smart_cropping: bool @@ -270,7 +271,7 @@ def generate_thumbnail( image_url = models.ImageUrl(url=url) # Construct URL - url = '/generateThumbnail' + url = self.generate_thumbnail.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -310,6 +311,7 @@ def generate_thumbnail( return client_raw_response return deserialized + generate_thumbnail.metadata = {'url': '/generateThumbnail'} def recognize_printed_text( self, url, detect_orientation=True, language="unk", custom_headers=None, raw=False, **operation_config): @@ -326,7 +328,7 @@ def recognize_printed_text( image orientation and correct it before further processing (e.g. if it's upside-down). :type detect_orientation: bool - :param url: + :param url: Publicly reachable URL of an image :type url: str :param language: The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: @@ -350,7 +352,7 @@ def recognize_printed_text( image_url = models.ImageUrl(url=url) # Construct URL - url = '/ocr' + url = self.recognize_printed_text.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -389,9 +391,10 @@ def recognize_printed_text( return client_raw_response return deserialized + recognize_printed_text.metadata = {'url': '/ocr'} def describe_image( - self, url, max_candidates="1", custom_headers=None, raw=False, **operation_config): + self, url, max_candidates="1", language="en", custom_headers=None, raw=False, **operation_config): """This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. @@ -402,11 +405,17 @@ def describe_image( returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. - :param url: + :param url: Publicly reachable URL of an image :type url: str :param max_candidates: Maximum number of candidate descriptions to be returned. The default is 1. :type max_candidates: str + :param language: The desired language for output generation. If this + parameter is not specified, the default value is + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -422,7 +431,7 @@ def describe_image( image_url = models.ImageUrl(url=url) # Construct URL - url = '/describe' + url = self.describe_image.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -432,6 +441,8 @@ def describe_image( query_parameters = {} if max_candidates is not None: query_parameters['maxCandidates'] = self._serialize.query("max_candidates", max_candidates, 'str') + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -460,20 +471,27 @@ def describe_image( return client_raw_response return deserialized + describe_image.metadata = {'url': '/describe'} def tag_image( - self, url, custom_headers=None, raw=False, **operation_config): + self, url, language="en", custom_headers=None, raw=False, **operation_config): """This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for - example the tag “cello” may be accompanied by the hint “musical - instrument”. All tags are in English. + example the tag 'cello' may be accompanied by the hint 'musical + instrument'. All tags are in English. - :param url: + :param url: Publicly reachable URL of an image :type url: str + :param language: The desired language for output generation. If this + parameter is not specified, the default value is + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -489,7 +507,7 @@ def tag_image( image_url = models.ImageUrl(url=url) # Construct URL - url = '/tag' + url = self.tag_image.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -497,6 +515,8 @@ def tag_image( # Construct parameters query_parameters = {} + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -525,6 +545,7 @@ def tag_image( return client_raw_response return deserialized + tag_image.metadata = {'url': '/tag'} def analyze_image_by_domain( self, model, url, custom_headers=None, raw=False, **operation_config): @@ -541,7 +562,7 @@ def analyze_image_by_domain( values include: 'Celebrities', 'Landmarks' :type model: str or ~azure.cognitiveservices.vision.computervision.models.DomainModels - :param url: + :param url: Publicly reachable URL of an image :type url: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -558,7 +579,7 @@ def analyze_image_by_domain( image_url = models.ImageUrl(url=url) # Construct URL - url = '/models/{model}/analyze' + url = self.analyze_image_by_domain.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True), 'model': self._serialize.url("model", model, 'DomainModels') @@ -595,18 +616,19 @@ def analyze_image_by_domain( return client_raw_response return deserialized + analyze_image_by_domain.metadata = {'url': '/models/{model}/analyze'} def recognize_text( self, url, detect_handwriting=False, custom_headers=None, raw=False, **operation_config): """Recognize Text operation. When you use the Recognize Text interface, - the response contains a field called “Operation-Location”. The - “Operation-Location” field contains the URL that you must use for your + the response contains a field called 'Operation-Location'. The + 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. - :param url: + :param url: Publicly reachable URL of an image :type url: str - :param detect_handwriting: If “true” is specified, handwriting - recognition is performed. If this parameter is set to “false” or is + :param detect_handwriting: If 'true' is specified, handwriting + recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. :type detect_handwriting: bool :param dict custom_headers: headers that will be added to the request @@ -622,7 +644,7 @@ def recognize_text( image_url = models.ImageUrl(url=url) # Construct URL - url = '/recognizeText' + url = self.recognize_text.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -656,6 +678,7 @@ def recognize_text( 'Operation-Location': 'str', }) return client_raw_response + recognize_text.metadata = {'url': '/recognizeText'} def get_text_operation_result( self, operation_id, custom_headers=None, raw=False, **operation_config): @@ -679,7 +702,7 @@ def get_text_operation_result( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/textOperations/{operationId}' + url = self.get_text_operation_result.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True), 'operationId': self._serialize.url("operation_id", operation_id, 'str') @@ -712,6 +735,7 @@ def get_text_operation_result( return client_raw_response return deserialized + get_text_operation_result.metadata = {'url': '/textOperations/{operationId}'} def analyze_image_in_stream( self, image, visual_features=None, details=None, language="en", custom_headers=None, raw=False, callback=None, **operation_config): @@ -739,11 +763,11 @@ def analyze_image_in_stream( feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' :type details: str - :param language: A string indicating which language to return. The - service will return recognition results in specified language. If this + :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.zh - - Simplified Chinese. Possible values include: 'en', 'zh' + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -763,7 +787,7 @@ def analyze_image_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/analyze' + url = self.analyze_image_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -805,6 +829,7 @@ def analyze_image_in_stream( return client_raw_response return deserialized + analyze_image_in_stream.metadata = {'url': '/analyze'} def generate_thumbnail_in_stream( self, width, height, image, smart_cropping=False, custom_headers=None, raw=False, callback=None, **operation_config): @@ -840,10 +865,10 @@ def generate_thumbnail_in_stream( :return: object or ClientRawResponse if raw=true :rtype: Generator or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ComputerVisionErrorException` + :class:`HttpOperationError` """ # Construct URL - url = '/generateThumbnail' + url = self.generate_thumbnail_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -871,7 +896,7 @@ def generate_thumbnail_in_stream( request, header_parameters, body_content, stream=True, **operation_config) if response.status_code not in [200]: - raise models.ComputerVisionErrorException(self._deserialize, response) + raise HttpOperationError(self._deserialize, response) deserialized = None @@ -883,6 +908,7 @@ def generate_thumbnail_in_stream( return client_raw_response return deserialized + generate_thumbnail_in_stream.metadata = {'url': '/generateThumbnail'} def recognize_printed_text_in_stream( self, image, detect_orientation=True, language="unk", custom_headers=None, raw=False, callback=None, **operation_config): @@ -926,7 +952,7 @@ def recognize_printed_text_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/ocr' + url = self.recognize_printed_text_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -965,9 +991,10 @@ def recognize_printed_text_in_stream( return client_raw_response return deserialized + recognize_printed_text_in_stream.metadata = {'url': '/ocr'} def describe_image_in_stream( - self, image, max_candidates="1", custom_headers=None, raw=False, callback=None, **operation_config): + self, image, max_candidates="1", language="en", custom_headers=None, raw=False, callback=None, **operation_config): """This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. @@ -983,6 +1010,12 @@ def describe_image_in_stream( :param max_candidates: Maximum number of candidate descriptions to be returned. The default is 1. :type max_candidates: str + :param language: The desired language for output generation. If this + parameter is not specified, the default value is + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -1001,7 +1034,7 @@ def describe_image_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/describe' + url = self.describe_image_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -1011,6 +1044,8 @@ def describe_image_in_stream( query_parameters = {} if max_candidates is not None: query_parameters['maxCandidates'] = self._serialize.query("max_candidates", max_candidates, 'str') + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -1039,6 +1074,7 @@ def describe_image_in_stream( return client_raw_response return deserialized + describe_image_in_stream.metadata = {'url': '/describe'} def tag_image_in_stream( self, image, custom_headers=None, raw=False, callback=None, **operation_config): @@ -1048,8 +1084,8 @@ def tag_image_in_stream( images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for - example the tag “cello” may be accompanied by the hint “musical - instrument”. All tags are in English. + example the tag 'cello' may be accompanied by the hint 'musical + instrument'. All tags are in English. :param image: An image stream. :type image: Generator @@ -1071,7 +1107,7 @@ def tag_image_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/tag' + url = self.tag_image_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -1107,6 +1143,7 @@ def tag_image_in_stream( return client_raw_response return deserialized + tag_image_in_stream.metadata = {'url': '/tag'} def analyze_image_by_domain_in_stream( self, model, image, custom_headers=None, raw=False, callback=None, **operation_config): @@ -1141,7 +1178,7 @@ def analyze_image_by_domain_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/models/{model}/analyze' + url = self.analyze_image_by_domain_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True), 'model': self._serialize.url("model", model, 'str') @@ -1178,18 +1215,19 @@ def analyze_image_by_domain_in_stream( return client_raw_response return deserialized + analyze_image_by_domain_in_stream.metadata = {'url': '/models/{model}/analyze'} def recognize_text_in_stream( self, image, detect_handwriting=False, custom_headers=None, raw=False, callback=None, **operation_config): """Recognize Text operation. When you use the Recognize Text interface, - the response contains a field called “Operation-Location”. The - “Operation-Location” field contains the URL that you must use for your + the response contains a field called 'Operation-Location'. The + 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. :param image: An image stream. :type image: Generator - :param detect_handwriting: If “true” is specified, handwriting - recognition is performed. If this parameter is set to “false” or is + :param detect_handwriting: If 'true' is specified, handwriting + recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. :type detect_handwriting: bool :param dict custom_headers: headers that will be added to the request @@ -1208,7 +1246,7 @@ def recognize_text_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/recognizeText' + url = self.recognize_text_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -1242,3 +1280,4 @@ def recognize_text_in_stream( 'Operation-Location': 'str', }) return client_raw_response + recognize_text_in_stream.metadata = {'url': '/recognizeText'} diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py index 8436045029f8..663008ab4f8c 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py @@ -9,42 +9,72 @@ # regenerated. # -------------------------------------------------------------------------- -from .word import Word -from .line import Line -from .recognition_result import RecognitionResult -from .text_operation_result import TextOperationResult -from .face_rectangle import FaceRectangle -from .celebrities_model import CelebritiesModel -from .category_detail import CategoryDetail -from .category import Category -from .adult_info import AdultInfo -from .color_info import ColorInfo -from .image_type import ImageType -from .image_tag import ImageTag -from .image_caption import ImageCaption -from .image_metadata import ImageMetadata -from .image_description_details import ImageDescriptionDetails -from .face_description import FaceDescription -from .image_analysis import ImageAnalysis -from .ocr_word import OcrWord -from .ocr_line import OcrLine -from .ocr_region import OcrRegion -from .ocr_result import OcrResult -from .model_description import ModelDescription -from .list_models_result import ListModelsResult -from .domain_model_results import DomainModelResults -from .image_description import ImageDescription -from .tag_result import TagResult -from .computer_vision_error import ComputerVisionError, ComputerVisionErrorException -from .image_url import ImageUrl +try: + from .word_py3 import Word + from .line_py3 import Line + from .recognition_result_py3 import RecognitionResult + from .text_operation_result_py3 import TextOperationResult + from .face_rectangle_py3 import FaceRectangle + from .celebrities_model_py3 import CelebritiesModel + from .category_detail_py3 import CategoryDetail + from .category_py3 import Category + from .adult_info_py3 import AdultInfo + from .color_info_py3 import ColorInfo + from .image_type_py3 import ImageType + from .image_tag_py3 import ImageTag + from .image_caption_py3 import ImageCaption + from .image_metadata_py3 import ImageMetadata + from .image_description_details_py3 import ImageDescriptionDetails + from .face_description_py3 import FaceDescription + from .image_analysis_py3 import ImageAnalysis + from .ocr_word_py3 import OcrWord + from .ocr_line_py3 import OcrLine + from .ocr_region_py3 import OcrRegion + from .ocr_result_py3 import OcrResult + from .model_description_py3 import ModelDescription + from .list_models_result_py3 import ListModelsResult + from .domain_model_results_py3 import DomainModelResults + from .image_description_py3 import ImageDescription + from .tag_result_py3 import TagResult + from .computer_vision_error_py3 import ComputerVisionError, ComputerVisionErrorException + from .image_url_py3 import ImageUrl +except (SyntaxError, ImportError): + from .word import Word + from .line import Line + from .recognition_result import RecognitionResult + from .text_operation_result import TextOperationResult + from .face_rectangle import FaceRectangle + from .celebrities_model import CelebritiesModel + from .category_detail import CategoryDetail + from .category import Category + from .adult_info import AdultInfo + from .color_info import ColorInfo + from .image_type import ImageType + from .image_tag import ImageTag + from .image_caption import ImageCaption + from .image_metadata import ImageMetadata + from .image_description_details import ImageDescriptionDetails + from .face_description import FaceDescription + from .image_analysis import ImageAnalysis + from .ocr_word import OcrWord + from .ocr_line import OcrLine + from .ocr_region import OcrRegion + from .ocr_result import OcrResult + from .model_description import ModelDescription + from .list_models_result import ListModelsResult + from .domain_model_results import DomainModelResults + from .image_description import ImageDescription + from .tag_result import TagResult + from .computer_vision_error import ComputerVisionError, ComputerVisionErrorException + from .image_url import ImageUrl from .computer_vision_api_enums import ( TextOperationStatusCodes, + Gender, ComputerVisionErrorCodes, VisualFeatureTypes, OcrLanguages, AzureRegions, Details, - Language1, DomainModels, ) @@ -78,11 +108,11 @@ 'ComputerVisionError', 'ComputerVisionErrorException', 'ImageUrl', 'TextOperationStatusCodes', + 'Gender', 'ComputerVisionErrorCodes', 'VisualFeatureTypes', 'OcrLanguages', 'AzureRegions', 'Details', - 'Language1', 'DomainModels', ] diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info.py index 37c70ec08dd2..aa674936a787 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info.py @@ -36,9 +36,9 @@ class AdultInfo(Model): 'racy_score': {'key': 'racyScore', 'type': 'float'}, } - def __init__(self, is_adult_content=None, is_racy_content=None, adult_score=None, racy_score=None): - super(AdultInfo, self).__init__() - self.is_adult_content = is_adult_content - self.is_racy_content = is_racy_content - self.adult_score = adult_score - self.racy_score = racy_score + def __init__(self, **kwargs): + super(AdultInfo, self).__init__(**kwargs) + self.is_adult_content = kwargs.get('is_adult_content', None) + self.is_racy_content = kwargs.get('is_racy_content', None) + self.adult_score = kwargs.get('adult_score', None) + self.racy_score = kwargs.get('racy_score', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info_py3.py new file mode 100644 index 000000000000..9c28b8351a4a --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AdultInfo(Model): + """An object describing whether the image contains adult-oriented content + and/or is racy. + + :param is_adult_content: A value indicating if the image contains + adult-oriented content. + :type is_adult_content: bool + :param is_racy_content: A value indicating if the image is race. + :type is_racy_content: bool + :param adult_score: Score from 0 to 1 that indicates how much of adult + content is within the image. + :type adult_score: float + :param racy_score: Score from 0 to 1 that indicates how suggestive is the + image. + :type racy_score: float + """ + + _attribute_map = { + 'is_adult_content': {'key': 'isAdultContent', 'type': 'bool'}, + 'is_racy_content': {'key': 'isRacyContent', 'type': 'bool'}, + 'adult_score': {'key': 'adultScore', 'type': 'float'}, + 'racy_score': {'key': 'racyScore', 'type': 'float'}, + } + + def __init__(self, *, is_adult_content: bool=None, is_racy_content: bool=None, adult_score: float=None, racy_score: float=None, **kwargs) -> None: + super(AdultInfo, self).__init__(**kwargs) + self.is_adult_content = is_adult_content + self.is_racy_content = is_racy_content + self.adult_score = adult_score + self.racy_score = racy_score diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category.py index 1a3e5cd8fe0d..234f283b0d5c 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category.py @@ -19,7 +19,7 @@ class Category(Model): :type name: str :param score: Scoring of the category. :type score: float - :param detail: Additional category detail if available. + :param detail: :type detail: ~azure.cognitiveservices.vision.computervision.models.CategoryDetail """ @@ -30,8 +30,8 @@ class Category(Model): 'detail': {'key': 'detail', 'type': 'CategoryDetail'}, } - def __init__(self, name=None, score=None, detail=None): - super(Category, self).__init__() - self.name = name - self.score = score - self.detail = detail + def __init__(self, **kwargs): + super(Category, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.score = kwargs.get('score', None) + self.detail = kwargs.get('detail', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py index 451ba60464be..eefbe5b691dc 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py @@ -24,6 +24,6 @@ class CategoryDetail(Model): 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, } - def __init__(self, celebrities=None): - super(CategoryDetail, self).__init__() - self.celebrities = celebrities + def __init__(self, **kwargs): + super(CategoryDetail, self).__init__(**kwargs) + self.celebrities = kwargs.get('celebrities', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py new file mode 100644 index 000000000000..b155197dab02 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CategoryDetail(Model): + """An object describing additional category details. + + :param celebrities: An array of celebrities if any identified. + :type celebrities: + list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + """ + + _attribute_map = { + 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, + } + + def __init__(self, *, celebrities=None, **kwargs) -> None: + super(CategoryDetail, self).__init__(**kwargs) + self.celebrities = celebrities diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py new file mode 100644 index 000000000000..e6c0c0485cc7 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Category(Model): + """An object describing identified category. + + :param name: Name of the category. + :type name: str + :param score: Scoring of the category. + :type score: float + :param detail: + :type detail: + ~azure.cognitiveservices.vision.computervision.models.CategoryDetail + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'score': {'key': 'score', 'type': 'float'}, + 'detail': {'key': 'detail', 'type': 'CategoryDetail'}, + } + + def __init__(self, *, name: str=None, score: float=None, detail=None, **kwargs) -> None: + super(Category, self).__init__(**kwargs) + self.name = name + self.score = score + self.detail = detail diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model.py index 4f1db214ffcb..bb6db51def40 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model.py @@ -30,8 +30,8 @@ class CelebritiesModel(Model): 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, } - def __init__(self, name=None, confidence=None, face_rectangle=None): - super(CelebritiesModel, self).__init__() - self.name = name - self.confidence = confidence - self.face_rectangle = face_rectangle + def __init__(self, **kwargs): + super(CelebritiesModel, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.confidence = kwargs.get('confidence', None) + self.face_rectangle = kwargs.get('face_rectangle', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model_py3.py new file mode 100644 index 000000000000..d50e338a979a --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CelebritiesModel(Model): + """An object describing possible celebrity identification. + + :param name: Name of the celebrity. + :type name: str + :param confidence: Level of confidence ranging from 0 to 1. + :type confidence: float + :param face_rectangle: + :type face_rectangle: + ~azure.cognitiveservices.vision.computervision.models.FaceRectangle + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, + } + + def __init__(self, *, name: str=None, confidence: float=None, face_rectangle=None, **kwargs) -> None: + super(CelebritiesModel, self).__init__(**kwargs) + self.name = name + self.confidence = confidence + self.face_rectangle = face_rectangle diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info.py index 5425d3ec6315..60d583ee8340 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info.py @@ -35,10 +35,10 @@ class ColorInfo(Model): 'is_bw_img': {'key': 'isBWImg', 'type': 'bool'}, } - def __init__(self, dominant_color_foreground=None, dominant_color_background=None, dominant_colors=None, accent_color=None, is_bw_img=None): - super(ColorInfo, self).__init__() - self.dominant_color_foreground = dominant_color_foreground - self.dominant_color_background = dominant_color_background - self.dominant_colors = dominant_colors - self.accent_color = accent_color - self.is_bw_img = is_bw_img + def __init__(self, **kwargs): + super(ColorInfo, self).__init__(**kwargs) + self.dominant_color_foreground = kwargs.get('dominant_color_foreground', None) + self.dominant_color_background = kwargs.get('dominant_color_background', None) + self.dominant_colors = kwargs.get('dominant_colors', None) + self.accent_color = kwargs.get('accent_color', None) + self.is_bw_img = kwargs.get('is_bw_img', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info_py3.py new file mode 100644 index 000000000000..c4320d353850 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ColorInfo(Model): + """An object providing additional metadata describing color attributes. + + :param dominant_color_foreground: Possible dominant foreground color. + :type dominant_color_foreground: str + :param dominant_color_background: Possible dominant background color. + :type dominant_color_background: str + :param dominant_colors: An array of possible dominant colors. + :type dominant_colors: list[str] + :param accent_color: Possible accent color. + :type accent_color: str + :param is_bw_img: A value indicating if the image is black and white. + :type is_bw_img: bool + """ + + _attribute_map = { + 'dominant_color_foreground': {'key': 'dominantColorForeground', 'type': 'str'}, + 'dominant_color_background': {'key': 'dominantColorBackground', 'type': 'str'}, + 'dominant_colors': {'key': 'dominantColors', 'type': '[str]'}, + 'accent_color': {'key': 'accentColor', 'type': 'str'}, + 'is_bw_img': {'key': 'isBWImg', 'type': 'bool'}, + } + + def __init__(self, *, dominant_color_foreground: str=None, dominant_color_background: str=None, dominant_colors=None, accent_color: str=None, is_bw_img: bool=None, **kwargs) -> None: + super(ColorInfo, self).__init__(**kwargs) + self.dominant_color_foreground = dominant_color_foreground + self.dominant_color_background = dominant_color_background + self.dominant_colors = dominant_colors + self.accent_color = accent_color + self.is_bw_img = is_bw_img diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py index 81f543b7151b..e8ce61ec14bb 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py @@ -12,7 +12,7 @@ from enum import Enum -class TextOperationStatusCodes(Enum): +class TextOperationStatusCodes(str, Enum): not_started = "Not Started" running = "Running" @@ -20,7 +20,13 @@ class TextOperationStatusCodes(Enum): succeeded = "Succeeded" -class ComputerVisionErrorCodes(Enum): +class Gender(str, Enum): + + male = "Male" + female = "Female" + + +class ComputerVisionErrorCodes(str, Enum): invalid_image_url = "InvalidImageUrl" invalid_image_format = "InvalidImageFormat" @@ -37,7 +43,7 @@ class ComputerVisionErrorCodes(Enum): storage_exception = "StorageException" -class VisualFeatureTypes(Enum): +class VisualFeatureTypes(str, Enum): image_type = "ImageType" faces = "Faces" @@ -48,7 +54,7 @@ class VisualFeatureTypes(Enum): description = "Description" -class OcrLanguages(Enum): +class OcrLanguages(str, Enum): unk = "unk" zh_hans = "zh-Hans" @@ -79,7 +85,7 @@ class OcrLanguages(Enum): sk = "sk" -class AzureRegions(Enum): +class AzureRegions(str, Enum): westus = "westus" westeurope = "westeurope" @@ -95,19 +101,13 @@ class AzureRegions(Enum): brazilsouth = "brazilsouth" -class Details(Enum): +class Details(str, Enum): celebrities = "Celebrities" landmarks = "Landmarks" -class Language1(Enum): - - en = "en" - zh = "zh" - - -class DomainModels(Enum): +class DomainModels(str, Enum): celebrities = "Celebrities" landmarks = "Landmarks" diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error.py index 651725041655..4350fe0694ab 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error.py @@ -16,14 +16,17 @@ class ComputerVisionError(Model): """ComputerVisionError. - :param code: The error code. Possible values include: 'InvalidImageUrl', - 'InvalidImageFormat', 'InvalidImageSize', 'NotSupportedVisualFeature', - 'NotSupportedImage', 'InvalidDetails', 'NotSupportedLanguage', - 'BadArgument', 'FailedToProcess', 'Timeout', 'InternalServerError', - 'Unspecified', 'StorageException' + All required parameters must be populated in order to send to Azure. + + :param code: Required. The error code. Possible values include: + 'InvalidImageUrl', 'InvalidImageFormat', 'InvalidImageSize', + 'NotSupportedVisualFeature', 'NotSupportedImage', 'InvalidDetails', + 'NotSupportedLanguage', 'BadArgument', 'FailedToProcess', 'Timeout', + 'InternalServerError', 'Unspecified', 'StorageException' :type code: str or ~azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorCodes - :param message: A message explaining the error reported by the service. + :param message: Required. A message explaining the error reported by the + service. :type message: str :param request_id: A unique request identifier. :type request_id: str @@ -40,11 +43,11 @@ class ComputerVisionError(Model): 'request_id': {'key': 'requestId', 'type': 'str'}, } - def __init__(self, code, message, request_id=None): - super(ComputerVisionError, self).__init__() - self.code = code - self.message = message - self.request_id = request_id + def __init__(self, **kwargs): + super(ComputerVisionError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.request_id = kwargs.get('request_id', None) class ComputerVisionErrorException(HttpOperationError): diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error_py3.py new file mode 100644 index 000000000000..b4112ddba880 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class ComputerVisionError(Model): + """ComputerVisionError. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. The error code. Possible values include: + 'InvalidImageUrl', 'InvalidImageFormat', 'InvalidImageSize', + 'NotSupportedVisualFeature', 'NotSupportedImage', 'InvalidDetails', + 'NotSupportedLanguage', 'BadArgument', 'FailedToProcess', 'Timeout', + 'InternalServerError', 'Unspecified', 'StorageException' + :type code: str or + ~azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorCodes + :param message: Required. A message explaining the error reported by the + service. + :type message: str + :param request_id: A unique request identifier. + :type request_id: str + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'ComputerVisionErrorCodes'}, + 'message': {'key': 'message', 'type': 'str'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + } + + def __init__(self, *, code, message: str, request_id: str=None, **kwargs) -> None: + super(ComputerVisionError, self).__init__(**kwargs) + self.code = code + self.message = message + self.request_id = request_id + + +class ComputerVisionErrorException(HttpOperationError): + """Server responsed with exception of type: 'ComputerVisionError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(ComputerVisionErrorException, self).__init__(deserialize, response, 'ComputerVisionError', *args) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py index bc241232c803..4093d33791b5 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py @@ -22,7 +22,7 @@ class DomainModelResults(Model): list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] :param request_id: Id of the REST API request. :type request_id: str - :param metadata: Additional image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -33,8 +33,8 @@ class DomainModelResults(Model): 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, celebrities=None, request_id=None, metadata=None): - super(DomainModelResults, self).__init__() - self.celebrities = celebrities - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(DomainModelResults, self).__init__(**kwargs) + self.celebrities = kwargs.get('celebrities', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py new file mode 100644 index 000000000000..ea639d2d2fc8 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DomainModelResults(Model): + """Result of image analysis using a specific domain model including additional + metadata. + + :param celebrities: An array of possible celebritied identified in the + image. + :type celebrities: + list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'celebrities': {'key': 'result.celebrities', 'type': '[CelebritiesModel]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, celebrities=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(DomainModelResults, self).__init__(**kwargs) + self.celebrities = celebrities + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description.py index bf8f36245e08..383a6ecd9576 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description.py @@ -20,7 +20,7 @@ class FaceDescription(Model): :param gender: Possible gender of the face. Possible values include: 'Male', 'Female' :type gender: str or - ~azure.cognitiveservices.vision.computervision.models.enum + ~azure.cognitiveservices.vision.computervision.models.Gender :param face_rectangle: :type face_rectangle: ~azure.cognitiveservices.vision.computervision.models.FaceRectangle @@ -28,12 +28,12 @@ class FaceDescription(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'int'}, - 'gender': {'key': 'gender', 'type': 'str'}, + 'gender': {'key': 'gender', 'type': 'Gender'}, 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, } - def __init__(self, age=None, gender=None, face_rectangle=None): - super(FaceDescription, self).__init__() - self.age = age - self.gender = gender - self.face_rectangle = face_rectangle + def __init__(self, **kwargs): + super(FaceDescription, self).__init__(**kwargs) + self.age = kwargs.get('age', None) + self.gender = kwargs.get('gender', None) + self.face_rectangle = kwargs.get('face_rectangle', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description_py3.py new file mode 100644 index 000000000000..63e9e941714c --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FaceDescription(Model): + """An object describing a face identified in the image. + + :param age: Possible age of the face. + :type age: int + :param gender: Possible gender of the face. Possible values include: + 'Male', 'Female' + :type gender: str or + ~azure.cognitiveservices.vision.computervision.models.Gender + :param face_rectangle: + :type face_rectangle: + ~azure.cognitiveservices.vision.computervision.models.FaceRectangle + """ + + _attribute_map = { + 'age': {'key': 'age', 'type': 'int'}, + 'gender': {'key': 'gender', 'type': 'Gender'}, + 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, + } + + def __init__(self, *, age: int=None, gender=None, face_rectangle=None, **kwargs) -> None: + super(FaceDescription, self).__init__(**kwargs) + self.age = age + self.gender = gender + self.face_rectangle = face_rectangle diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle.py index 6edf8f06acbb..566ecc6100d4 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle.py @@ -32,9 +32,9 @@ class FaceRectangle(Model): 'height': {'key': 'height', 'type': 'int'}, } - def __init__(self, left=None, top=None, width=None, height=None): - super(FaceRectangle, self).__init__() - self.left = left - self.top = top - self.width = width - self.height = height + def __init__(self, **kwargs): + super(FaceRectangle, self).__init__(**kwargs) + self.left = kwargs.get('left', None) + self.top = kwargs.get('top', None) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle_py3.py new file mode 100644 index 000000000000..aa8b0daaff59 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FaceRectangle(Model): + """An object describing face rectangle. + + :param left: X-coordinate of the top left point of the face. + :type left: int + :param top: Y-coordinate of the top left point of the face. + :type top: int + :param width: Width measured from the top-left point of the face. + :type width: int + :param height: Height measured from the top-left point of the face. + :type height: int + """ + + _attribute_map = { + 'left': {'key': 'left', 'type': 'int'}, + 'top': {'key': 'top', 'type': 'int'}, + 'width': {'key': 'width', 'type': 'int'}, + 'height': {'key': 'height', 'type': 'int'}, + } + + def __init__(self, *, left: int=None, top: int=None, width: int=None, height: int=None, **kwargs) -> None: + super(FaceRectangle, self).__init__(**kwargs) + self.left = left + self.top = top + self.width = width + self.height = height diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis.py index 5bf9320a7d26..5d7e3c308e43 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis.py @@ -18,21 +18,19 @@ class ImageAnalysis(Model): :param categories: An array indicating identified categories. :type categories: list[~azure.cognitiveservices.vision.computervision.models.Category] - :param adult: A property scoring on whether the image is adult-oriented - and/or racy. + :param adult: :type adult: ~azure.cognitiveservices.vision.computervision.models.AdultInfo - :param color: A property scoring on color spectrums. + :param color: :type color: ~azure.cognitiveservices.vision.computervision.models.ColorInfo - :param image_type: A property indicating type of image (whether it's - clipart or line drawing) + :param image_type: :type image_type: ~azure.cognitiveservices.vision.computervision.models.ImageType :param tags: A list of tags with confidence level. :type tags: list[~azure.cognitiveservices.vision.computervision.models.ImageTag] - :param description: Description of the image. + :param description: :type description: ~azure.cognitiveservices.vision.computervision.models.ImageDescriptionDetails :param faces: An array of possible faces within the image. @@ -40,7 +38,7 @@ class ImageAnalysis(Model): list[~azure.cognitiveservices.vision.computervision.models.FaceDescription] :param request_id: Id of the request for tracking purposes. :type request_id: str - :param metadata: Image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -57,14 +55,14 @@ class ImageAnalysis(Model): 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, categories=None, adult=None, color=None, image_type=None, tags=None, description=None, faces=None, request_id=None, metadata=None): - super(ImageAnalysis, self).__init__() - self.categories = categories - self.adult = adult - self.color = color - self.image_type = image_type - self.tags = tags - self.description = description - self.faces = faces - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(ImageAnalysis, self).__init__(**kwargs) + self.categories = kwargs.get('categories', None) + self.adult = kwargs.get('adult', None) + self.color = kwargs.get('color', None) + self.image_type = kwargs.get('image_type', None) + self.tags = kwargs.get('tags', None) + self.description = kwargs.get('description', None) + self.faces = kwargs.get('faces', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis_py3.py new file mode 100644 index 000000000000..45eaf49c71d8 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis_py3.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageAnalysis(Model): + """Result of AnalyzeImage operation. + + :param categories: An array indicating identified categories. + :type categories: + list[~azure.cognitiveservices.vision.computervision.models.Category] + :param adult: + :type adult: + ~azure.cognitiveservices.vision.computervision.models.AdultInfo + :param color: + :type color: + ~azure.cognitiveservices.vision.computervision.models.ColorInfo + :param image_type: + :type image_type: + ~azure.cognitiveservices.vision.computervision.models.ImageType + :param tags: A list of tags with confidence level. + :type tags: + list[~azure.cognitiveservices.vision.computervision.models.ImageTag] + :param description: + :type description: + ~azure.cognitiveservices.vision.computervision.models.ImageDescriptionDetails + :param faces: An array of possible faces within the image. + :type faces: + list[~azure.cognitiveservices.vision.computervision.models.FaceDescription] + :param request_id: Id of the request for tracking purposes. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'categories': {'key': 'categories', 'type': '[Category]'}, + 'adult': {'key': 'adult', 'type': 'AdultInfo'}, + 'color': {'key': 'color', 'type': 'ColorInfo'}, + 'image_type': {'key': 'imageType', 'type': 'ImageType'}, + 'tags': {'key': 'tags', 'type': '[ImageTag]'}, + 'description': {'key': 'description', 'type': 'ImageDescriptionDetails'}, + 'faces': {'key': 'faces', 'type': '[FaceDescription]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, categories=None, adult=None, color=None, image_type=None, tags=None, description=None, faces=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(ImageAnalysis, self).__init__(**kwargs) + self.categories = categories + self.adult = adult + self.color = color + self.image_type = image_type + self.tags = tags + self.description = description + self.faces = faces + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption.py index 448226f12d93..ec9aa4b93f80 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption.py @@ -26,7 +26,7 @@ class ImageCaption(Model): 'confidence': {'key': 'confidence', 'type': 'float'}, } - def __init__(self, text=None, confidence=None): - super(ImageCaption, self).__init__() - self.text = text - self.confidence = confidence + def __init__(self, **kwargs): + super(ImageCaption, self).__init__(**kwargs) + self.text = kwargs.get('text', None) + self.confidence = kwargs.get('confidence', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption_py3.py new file mode 100644 index 000000000000..782f89cda23e --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageCaption(Model): + """An image caption, i.e. a brief description of what the image depicts. + + :param text: The text of the caption + :type text: str + :param confidence: The level of confidence the service has in the caption + :type confidence: float + """ + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, text: str=None, confidence: float=None, **kwargs) -> None: + super(ImageCaption, self).__init__(**kwargs) + self.text = text + self.confidence = confidence diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py index c9f0d374a8b3..6a84422b6b65 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py @@ -23,7 +23,7 @@ class ImageDescription(Model): list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] :param request_id: Id of the REST API request. :type request_id: str - :param metadata: Image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -35,9 +35,9 @@ class ImageDescription(Model): 'metadata': {'key': 'description.metadata', 'type': 'ImageMetadata'}, } - def __init__(self, tags=None, captions=None, request_id=None, metadata=None): - super(ImageDescription, self).__init__() - self.tags = tags - self.captions = captions - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(ImageDescription, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.captions = kwargs.get('captions', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py index f8ada2c66c82..1e6afbb99ed7 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py @@ -23,7 +23,7 @@ class ImageDescriptionDetails(Model): list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] :param request_id: Id of the REST API request. :type request_id: str - :param metadata: Image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -35,9 +35,9 @@ class ImageDescriptionDetails(Model): 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, tags=None, captions=None, request_id=None, metadata=None): - super(ImageDescriptionDetails, self).__init__() - self.tags = tags - self.captions = captions - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(ImageDescriptionDetails, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.captions = kwargs.get('captions', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py new file mode 100644 index 000000000000..702d4ac029de --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageDescriptionDetails(Model): + """A collection of content tags, along with a list of captions sorted by + confidence level, and image metadata. + + :param tags: A collection of image tags. + :type tags: list[str] + :param captions: A list of captions, sorted by confidence level. + :type captions: + list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '[str]'}, + 'captions': {'key': 'captions', 'type': '[ImageCaption]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, tags=None, captions=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(ImageDescriptionDetails, self).__init__(**kwargs) + self.tags = tags + self.captions = captions + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py new file mode 100644 index 000000000000..3ec3fa9c951a --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageDescription(Model): + """A collection of content tags, along with a list of captions sorted by + confidence level, and image metadata. + + :param tags: A collection of image tags. + :type tags: list[str] + :param captions: A list of captions, sorted by confidence level. + :type captions: + list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'tags': {'key': 'description.tags', 'type': '[str]'}, + 'captions': {'key': 'description.captions', 'type': '[ImageCaption]'}, + 'request_id': {'key': 'description.requestId', 'type': 'str'}, + 'metadata': {'key': 'description.metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, tags=None, captions=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(ImageDescription, self).__init__(**kwargs) + self.tags = tags + self.captions = captions + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata.py index baa2a46a8dd2..797206379282 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata.py @@ -29,8 +29,8 @@ class ImageMetadata(Model): 'format': {'key': 'format', 'type': 'str'}, } - def __init__(self, width=None, height=None, format=None): - super(ImageMetadata, self).__init__() - self.width = width - self.height = height - self.format = format + def __init__(self, **kwargs): + super(ImageMetadata, self).__init__(**kwargs) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) + self.format = kwargs.get('format', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata_py3.py new file mode 100644 index 000000000000..2d6bb256c481 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageMetadata(Model): + """Image metadata. + + :param width: Image width + :type width: int + :param height: Image height + :type height: int + :param format: Image format + :type format: str + """ + + _attribute_map = { + 'width': {'key': 'width', 'type': 'int'}, + 'height': {'key': 'height', 'type': 'int'}, + 'format': {'key': 'format', 'type': 'str'}, + } + + def __init__(self, *, width: int=None, height: int=None, format: str=None, **kwargs) -> None: + super(ImageMetadata, self).__init__(**kwargs) + self.width = width + self.height = height + self.format = format diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py index 14524a29143e..93f349e9d4c0 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py @@ -26,7 +26,7 @@ class ImageTag(Model): 'confidence': {'key': 'confidence', 'type': 'float'}, } - def __init__(self, name=None, confidence=None): - super(ImageTag, self).__init__() - self.name = name - self.confidence = confidence + def __init__(self, **kwargs): + super(ImageTag, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.confidence = kwargs.get('confidence', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py new file mode 100644 index 000000000000..ed598af42843 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageTag(Model): + """An image caption, i.e. a brief description of what the image depicts. + + :param name: The tag value + :type name: str + :param confidence: The level of confidence the service has in the caption + :type confidence: float + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, name: str=None, confidence: float=None, **kwargs) -> None: + super(ImageTag, self).__init__(**kwargs) + self.name = name + self.confidence = confidence diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type.py index 6716583b37ab..2c475662850c 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type.py @@ -27,7 +27,7 @@ class ImageType(Model): 'line_drawing_type': {'key': 'lineDrawingType', 'type': 'float'}, } - def __init__(self, clip_art_type=None, line_drawing_type=None): - super(ImageType, self).__init__() - self.clip_art_type = clip_art_type - self.line_drawing_type = line_drawing_type + def __init__(self, **kwargs): + super(ImageType, self).__init__(**kwargs) + self.clip_art_type = kwargs.get('clip_art_type', None) + self.line_drawing_type = kwargs.get('line_drawing_type', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type_py3.py new file mode 100644 index 000000000000..ecfd8b6af808 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageType(Model): + """An object providing possible image types and matching confidence levels. + + :param clip_art_type: Confidence level that the image is a clip art. + :type clip_art_type: float + :param line_drawing_type: Confidence level that the image is a line + drawing. + :type line_drawing_type: float + """ + + _attribute_map = { + 'clip_art_type': {'key': 'clipArtType', 'type': 'float'}, + 'line_drawing_type': {'key': 'lineDrawingType', 'type': 'float'}, + } + + def __init__(self, *, clip_art_type: float=None, line_drawing_type: float=None, **kwargs) -> None: + super(ImageType, self).__init__(**kwargs) + self.clip_art_type = clip_art_type + self.line_drawing_type = line_drawing_type diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url.py index 05f4dab7f611..25106793ad9c 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url.py @@ -15,7 +15,9 @@ class ImageUrl(Model): """ImageUrl. - :param url: + All required parameters must be populated in order to send to Azure. + + :param url: Required. Publicly reachable URL of an image :type url: str """ @@ -27,6 +29,6 @@ class ImageUrl(Model): 'url': {'key': 'url', 'type': 'str'}, } - def __init__(self, url): - super(ImageUrl, self).__init__() - self.url = url + def __init__(self, **kwargs): + super(ImageUrl, self).__init__(**kwargs) + self.url = kwargs.get('url', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url_py3.py new file mode 100644 index 000000000000..3e00709f804d --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageUrl(Model): + """ImageUrl. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. Publicly reachable URL of an image + :type url: str + """ + + _validation = { + 'url': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, *, url: str, **kwargs) -> None: + super(ImageUrl, self).__init__(**kwargs) + self.url = url diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line.py index f9f03ab03780..3c6df06a5c12 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line.py @@ -30,8 +30,8 @@ class Line(Model): 'words': {'key': 'words', 'type': '[Word]'}, } - def __init__(self, bounding_box=None, text=None, words=None): - super(Line, self).__init__() - self.bounding_box = bounding_box - self.text = text - self.words = words + def __init__(self, **kwargs): + super(Line, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.text = kwargs.get('text', None) + self.words = kwargs.get('words', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line_py3.py new file mode 100644 index 000000000000..eaa7b16fa07c --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Line(Model): + """Line. + + :param bounding_box: + :type bounding_box: list[int] + :param text: + :type text: str + :param words: + :type words: + list[~azure.cognitiveservices.vision.computervision.models.Word] + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': '[int]'}, + 'text': {'key': 'text', 'type': 'str'}, + 'words': {'key': 'words', 'type': '[Word]'}, + } + + def __init__(self, *, bounding_box=None, text: str=None, words=None, **kwargs) -> None: + super(Line, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.text = text + self.words = words diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result.py index ad6e2cc61c42..de784bdce7dc 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result.py @@ -31,6 +31,6 @@ class ListModelsResult(Model): 'models_property': {'key': 'models', 'type': '[ModelDescription]'}, } - def __init__(self): - super(ListModelsResult, self).__init__() + def __init__(self, **kwargs): + super(ListModelsResult, self).__init__(**kwargs) self.models_property = None diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result_py3.py new file mode 100644 index 000000000000..9fff20826a83 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ListModelsResult(Model): + """Result of the List Domain Models operation. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar models_property: An array of supported models. + :vartype models_property: + list[~azure.cognitiveservices.vision.computervision.models.ModelDescription] + """ + + _validation = { + 'models_property': {'readonly': True}, + } + + _attribute_map = { + 'models_property': {'key': 'models', 'type': '[ModelDescription]'}, + } + + def __init__(self, **kwargs) -> None: + super(ListModelsResult, self).__init__(**kwargs) + self.models_property = None diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description.py index 4ac5b2c75ddb..b12081359419 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description.py @@ -26,7 +26,7 @@ class ModelDescription(Model): 'categories': {'key': 'categories', 'type': '[str]'}, } - def __init__(self, name=None, categories=None): - super(ModelDescription, self).__init__() - self.name = name - self.categories = categories + def __init__(self, **kwargs): + super(ModelDescription, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.categories = kwargs.get('categories', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description_py3.py new file mode 100644 index 000000000000..e5fc81d86ff8 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ModelDescription(Model): + """An object describing supported model by name and categories. + + :param name: + :type name: str + :param categories: + :type categories: list[str] + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'categories': {'key': 'categories', 'type': '[str]'}, + } + + def __init__(self, *, name: str=None, categories=None, **kwargs) -> None: + super(ModelDescription, self).__init__(**kwargs) + self.name = name + self.categories = categories diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line.py index 1a417d2ebc3b..72eef4e29c53 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line.py @@ -33,7 +33,7 @@ class OcrLine(Model): 'words': {'key': 'words', 'type': '[OcrWord]'}, } - def __init__(self, bounding_box=None, words=None): - super(OcrLine, self).__init__() - self.bounding_box = bounding_box - self.words = words + def __init__(self, **kwargs): + super(OcrLine, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.words = kwargs.get('words', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line_py3.py new file mode 100644 index 000000000000..99d4636b7e81 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OcrLine(Model): + """An object describing a single recognized line of text. + + :param bounding_box: Bounding box of a recognized line. The four integers + represent the x-coordinate of the left edge, the y-coordinate of the top + edge, width, and height of the bounding box, in the coordinate system of + the input image, after it has been rotated around its center according to + the detected text angle (see textAngle property), with the origin at the + top-left corner, and the y-axis pointing down. + :type bounding_box: str + :param words: An array of objects, where each object represents a + recognized word. + :type words: + list[~azure.cognitiveservices.vision.computervision.models.OcrWord] + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, + 'words': {'key': 'words', 'type': '[OcrWord]'}, + } + + def __init__(self, *, bounding_box: str=None, words=None, **kwargs) -> None: + super(OcrLine, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.words = words diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region.py index f6f75cf27b3c..ddbeda6431ed 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region.py @@ -33,7 +33,7 @@ class OcrRegion(Model): 'lines': {'key': 'lines', 'type': '[OcrLine]'}, } - def __init__(self, bounding_box=None, lines=None): - super(OcrRegion, self).__init__() - self.bounding_box = bounding_box - self.lines = lines + def __init__(self, **kwargs): + super(OcrRegion, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.lines = kwargs.get('lines', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region_py3.py new file mode 100644 index 000000000000..6ae209a84e5c --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OcrRegion(Model): + """A region consists of multiple lines (e.g. a column of text in a + multi-column document). + + :param bounding_box: Bounding box of a recognized region. The four + integers represent the x-coordinate of the left edge, the y-coordinate of + the top edge, width, and height of the bounding box, in the coordinate + system of the input image, after it has been rotated around its center + according to the detected text angle (see textAngle property), with the + origin at the top-left corner, and the y-axis pointing down. + :type bounding_box: str + :param lines: + :type lines: + list[~azure.cognitiveservices.vision.computervision.models.OcrLine] + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, + 'lines': {'key': 'lines', 'type': '[OcrLine]'}, + } + + def __init__(self, *, bounding_box: str=None, lines=None, **kwargs) -> None: + super(OcrRegion, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.lines = lines diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py index a3f5956f360b..eb573d1338e7 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py @@ -47,9 +47,9 @@ class OcrResult(Model): 'regions': {'key': 'regions', 'type': '[OcrRegion]'}, } - def __init__(self, language=None, text_angle=None, orientation=None, regions=None): - super(OcrResult, self).__init__() - self.language = language - self.text_angle = text_angle - self.orientation = orientation - self.regions = regions + def __init__(self, **kwargs): + super(OcrResult, self).__init__(**kwargs) + self.language = kwargs.get('language', None) + self.text_angle = kwargs.get('text_angle', None) + self.orientation = kwargs.get('orientation', None) + self.regions = kwargs.get('regions', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py new file mode 100644 index 000000000000..413fb9637d4e --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OcrResult(Model): + """OcrResult. + + :param language: + :type language: + ~azure.cognitiveservices.vision.computervision.models.OcrResult + :param text_angle: The angle, in degrees, of the detected text with + respect to the closest horizontal or vertical direction. After rotating + the input image clockwise by this angle, the recognized text lines become + horizontal or vertical. In combination with the orientation property it + can be used to overlay recognition results correctly on the original + image, by rotating either the original image or recognition results by a + suitable angle around the center of the original image. If the angle + cannot be confidently detected, this property is not present. If the image + contains text at different angles, only part of the text will be + recognized correctly. + :type text_angle: float + :param orientation: Orientation of the text recognized in the image. The + value (up,down,left, or right) refers to the direction that the top of the + recognized text is facing, after the image has been rotated around its + center according to the detected text angle (see textAngle property). + :type orientation: str + :param regions: An array of objects, where each object represents a region + of recognized text. + :type regions: + list[~azure.cognitiveservices.vision.computervision.models.OcrRegion] + """ + + _attribute_map = { + 'language': {'key': 'language', 'type': 'OcrResult'}, + 'text_angle': {'key': 'textAngle', 'type': 'float'}, + 'orientation': {'key': 'orientation', 'type': 'str'}, + 'regions': {'key': 'regions', 'type': '[OcrRegion]'}, + } + + def __init__(self, *, language=None, text_angle: float=None, orientation: str=None, regions=None, **kwargs) -> None: + super(OcrResult, self).__init__(**kwargs) + self.language = language + self.text_angle = text_angle + self.orientation = orientation + self.regions = regions diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word.py index d5cf1f2e5679..c0ff18701bff 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word.py @@ -31,7 +31,7 @@ class OcrWord(Model): 'text': {'key': 'text', 'type': 'str'}, } - def __init__(self, bounding_box=None, text=None): - super(OcrWord, self).__init__() - self.bounding_box = bounding_box - self.text = text + def __init__(self, **kwargs): + super(OcrWord, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.text = kwargs.get('text', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word_py3.py new file mode 100644 index 000000000000..3e7705087b49 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OcrWord(Model): + """Information on a recognized word. + + :param bounding_box: Bounding box of a recognized word. The four integers + represent the x-coordinate of the left edge, the y-coordinate of the top + edge, width, and height of the bounding box, in the coordinate system of + the input image, after it has been rotated around its center according to + the detected text angle (see textAngle property), with the origin at the + top-left corner, and the y-axis pointing down. + :type bounding_box: str + :param text: String value of a recognized word. + :type text: str + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, + 'text': {'key': 'text', 'type': 'str'}, + } + + def __init__(self, *, bounding_box: str=None, text: str=None, **kwargs) -> None: + super(OcrWord, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.text = text diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result.py index 791ac7db95e6..628dde0dae9a 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result.py @@ -24,6 +24,6 @@ class RecognitionResult(Model): 'lines': {'key': 'lines', 'type': '[Line]'}, } - def __init__(self, lines=None): - super(RecognitionResult, self).__init__() - self.lines = lines + def __init__(self, **kwargs): + super(RecognitionResult, self).__init__(**kwargs) + self.lines = kwargs.get('lines', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result_py3.py new file mode 100644 index 000000000000..c809646eac7b --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecognitionResult(Model): + """RecognitionResult. + + :param lines: + :type lines: + list[~azure.cognitiveservices.vision.computervision.models.Line] + """ + + _attribute_map = { + 'lines': {'key': 'lines', 'type': '[Line]'}, + } + + def __init__(self, *, lines=None, **kwargs) -> None: + super(RecognitionResult, self).__init__(**kwargs) + self.lines = lines diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result.py index 4b93359d42cc..70ed25e51294 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result.py @@ -21,7 +21,7 @@ class TagResult(Model): list[~azure.cognitiveservices.vision.computervision.models.ImageTag] :param request_id: Id of the REST API request. :type request_id: str - :param metadata: Image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -32,8 +32,8 @@ class TagResult(Model): 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, tags=None, request_id=None, metadata=None): - super(TagResult, self).__init__() - self.tags = tags - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(TagResult, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result_py3.py new file mode 100644 index 000000000000..5957e130893c --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TagResult(Model): + """The results of a image tag operation, including any tags and image + metadata. + + :param tags: A list of tags with confidence level. + :type tags: + list[~azure.cognitiveservices.vision.computervision.models.ImageTag] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '[ImageTag]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, tags=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(TagResult, self).__init__(**kwargs) + self.tags = tags + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result.py index 02fd499ea644..301f07ab62dd 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result.py @@ -29,7 +29,7 @@ class TextOperationResult(Model): 'recognition_result': {'key': 'recognitionResult', 'type': 'RecognitionResult'}, } - def __init__(self, status=None, recognition_result=None): - super(TextOperationResult, self).__init__() - self.status = status - self.recognition_result = recognition_result + def __init__(self, **kwargs): + super(TextOperationResult, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.recognition_result = kwargs.get('recognition_result', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result_py3.py new file mode 100644 index 000000000000..cd6adfb3d754 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TextOperationResult(Model): + """TextOperationResult. + + :param status: Status of the text operation. Possible values include: 'Not + Started', 'Running', 'Failed', 'Succeeded' + :type status: str or + ~azure.cognitiveservices.vision.computervision.models.TextOperationStatusCodes + :param recognition_result: + :type recognition_result: + ~azure.cognitiveservices.vision.computervision.models.RecognitionResult + """ + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TextOperationStatusCodes'}, + 'recognition_result': {'key': 'recognitionResult', 'type': 'RecognitionResult'}, + } + + def __init__(self, *, status=None, recognition_result=None, **kwargs) -> None: + super(TextOperationResult, self).__init__(**kwargs) + self.status = status + self.recognition_result = recognition_result diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word.py index e5356b053373..af6015b9ed0d 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word.py @@ -26,7 +26,7 @@ class Word(Model): 'text': {'key': 'text', 'type': 'str'}, } - def __init__(self, bounding_box=None, text=None): - super(Word, self).__init__() - self.bounding_box = bounding_box - self.text = text + def __init__(self, **kwargs): + super(Word, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.text = kwargs.get('text', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word_py3.py new file mode 100644 index 000000000000..ea3dc0845b3e --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Word(Model): + """Word. + + :param bounding_box: + :type bounding_box: list[int] + :param text: + :type text: str + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': '[int]'}, + 'text': {'key': 'text', 'type': 'str'}, + } + + def __init__(self, *, bounding_box=None, text: str=None, **kwargs) -> None: + super(Word, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.text = text diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py index e0ec669828cb..63d89bfb54fa 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "0.1.0" +VERSION = "1.0"