From b957717a1839a5de6789899c360673b8f618f63f Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Wed, 23 May 2018 10:34:24 -0700 Subject: [PATCH 1/7] Generated from 984c12a568b2dd36acb521eb3b1c00fbd5b42995 (#2575) Update CognitiveService ComputerVision Swagger for new extensions (#1) * Merged PR 17875: Add new languages Add new languages Also tidy up the enum definition so that AutoRest generates C# that actually compiles * Merged PR 17942: Replace fancy quotes with plain single quotes Make javac compile without complaint even without setting the codepage to utf-8 by dropping the gratuitous typographic quotes. * Merged PR 17956: Rename url argument for overloaded endpoint to imageUrl This name will become the name of the argument in Python. Since you can used named arguments in Python, give them less ambiguous names. * Changes to make --azure-validator run more cleanly Errors Fixed * XmsExamplesRequired/D5001/Documentation (7) * XmsPathsMustOverloadPaths/R2058/SDKViolation (7) Errors Ignored * SecurityDefinitionsStructure/R2054/SDKViolation (1) - API Key scheme for Cognitive Services * APIVersionPattern/R3012/ARMViolation (1) - Keeping with the Cognitive Services numbering scheme * OperationsAPIImplementation/R3023/ARMViolation (1) - Not sure what this is Warnings Fixed * XmsEnumValidation/R2018/SDKViolation (1) * DescriptionAndTitleMissing/R4000/SDKViolation (1) Warnings Ignored * DescriptionAndTitleMissing/R4000/SDKViolation (35) - description is contained in #ref * EnumInsteadOfBoolean/R3018/ARMViolation (5) - fix would be a breaking change * ListInOperationName/R1003/SDKViolation (1) - fix would be a breaking change * LongRunningOperationsWithLongRunningExtension/R2007/SDKViolation (2) - endpoint will not ultimately yield a 200, as this extension would require * NonApplicationJsonType/R2004/ARMViolation (15) - fix would be breaking change * PageableOperation/R2029/SDKViolation (1) - not actually pageable * ParameterNotDefinedInGlobalParameters/R2015/SDKViolation (1) - All our own parameters specify x-ms-parameter-location * PostOperationIdContainsUrlVerb/R2066/SDKViolation (7) - fix would be breaking change --- .../computervision/computer_vision_api.py | 147 +++++++++++------- .../vision/computervision/models/__init__.py | 90 +++++++---- .../computervision/models/adult_info.py | 12 +- .../computervision/models/adult_info_py3.py | 44 ++++++ .../vision/computervision/models/category.py | 12 +- .../computervision/models/category_detail.py | 6 +- .../models/category_detail_py3.py | 29 ++++ .../computervision/models/category_py3.py | 37 +++++ .../models/celebrities_model.py | 10 +- .../models/celebrities_model_py3.py | 37 +++++ .../computervision/models/color_info.py | 14 +- .../computervision/models/color_info_py3.py | 44 ++++++ .../models/computer_vision_api_enums.py | 26 ++-- .../models/computer_vision_error.py | 25 +-- .../models/computer_vision_error_py3.py | 62 ++++++++ .../models/domain_model_results.py | 12 +- .../models/domain_model_results_py3.py | 40 +++++ .../computervision/models/face_description.py | 14 +- .../models/face_description_py3.py | 39 +++++ .../computervision/models/face_rectangle.py | 12 +- .../models/face_rectangle_py3.py | 40 +++++ .../computervision/models/image_analysis.py | 34 ++-- .../models/image_analysis_py3.py | 68 ++++++++ .../computervision/models/image_caption.py | 8 +- .../models/image_caption_py3.py | 32 ++++ .../models/image_description.py | 14 +- .../models/image_description_details.py | 14 +- .../models/image_description_details_py3.py | 43 +++++ .../models/image_description_py3.py | 43 +++++ .../computervision/models/image_metadata.py | 10 +- .../models/image_metadata_py3.py | 36 +++++ .../vision/computervision/models/image_tag.py | 8 +- .../computervision/models/image_tag_py3.py | 32 ++++ .../computervision/models/image_type.py | 8 +- .../computervision/models/image_type_py3.py | 33 ++++ .../vision/computervision/models/image_url.py | 10 +- .../computervision/models/image_url_py3.py | 34 ++++ .../vision/computervision/models/line.py | 10 +- .../vision/computervision/models/line_py3.py | 37 +++++ .../models/list_models_result.py | 4 +- .../models/list_models_result_py3.py | 36 +++++ .../models/model_description.py | 8 +- .../models/model_description_py3.py | 32 ++++ .../vision/computervision/models/ocr_line.py | 8 +- .../computervision/models/ocr_line_py3.py | 39 +++++ .../computervision/models/ocr_region.py | 8 +- .../computervision/models/ocr_region_py3.py | 39 +++++ .../computervision/models/ocr_result.py | 12 +- .../computervision/models/ocr_result_py3.py | 55 +++++++ .../vision/computervision/models/ocr_word.py | 8 +- .../computervision/models/ocr_word_py3.py | 37 +++++ .../models/recognition_result.py | 6 +- .../models/recognition_result_py3.py | 29 ++++ .../computervision/models/tag_result.py | 12 +- .../computervision/models/tag_result_py3.py | 39 +++++ .../models/text_operation_result.py | 8 +- .../models/text_operation_result_py3.py | 35 +++++ .../vision/computervision/models/word.py | 8 +- .../vision/computervision/models/word_py3.py | 32 ++++ .../vision/computervision/version.py | 2 +- 60 files changed, 1429 insertions(+), 254 deletions(-) create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word_py3.py diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py index 50cd56e3c61c..db22d1d994e0 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py @@ -9,7 +9,7 @@ # regenerated. # -------------------------------------------------------------------------- -from msrest.service_client import ServiceClient +from msrest.service_client import SDKClient from msrest import Configuration, Serializer, Deserializer from .version import VERSION from msrest.pipeline import ClientRawResponse @@ -51,7 +51,7 @@ def __init__( self.credentials = credentials -class ComputerVisionAPI(object): +class ComputerVisionAPI(SDKClient): """The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively. :ivar config: Configuration for client. @@ -73,7 +73,7 @@ def __init__( self, azure_region, credentials): self.config = ComputerVisionAPIConfiguration(azure_region, credentials) - self._client = ServiceClient(self.config.credentials, self.config) + super(ComputerVisionAPI, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self.api_version = '1.0' @@ -103,7 +103,7 @@ def list_models( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/models' + url = self.list_models.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -135,6 +135,7 @@ def list_models( return client_raw_response return deserialized + list_models.metadata = {'url': '/models'} def analyze_image( self, url, visual_features=None, details=None, language="en", custom_headers=None, raw=False, **operation_config): @@ -144,7 +145,7 @@ def analyze_image( optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. - :param url: + :param url: Publicly reachable URL of an image :type url: str :param visual_features: A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual @@ -166,13 +167,12 @@ def analyze_image( in the image. :type details: list[str or ~azure.cognitiveservices.vision.computervision.models.Details] - :param language: A string indicating which language to return. The - service will return recognition results in specified language. If this + :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.zh - - Simplified Chinese. Possible values include: 'en', 'zh' - :type language: str or - ~azure.cognitiveservices.vision.computervision.models.Language1 + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -188,7 +188,7 @@ def analyze_image( image_url = models.ImageUrl(url=url) # Construct URL - url = '/analyze' + url = self.analyze_image.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -201,7 +201,7 @@ def analyze_image( if details is not None: query_parameters['details'] = self._serialize.query("details", details, '[Details]', div=',') if language is not None: - query_parameters['language'] = self._serialize.query("language", language, 'Language1') + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -230,6 +230,7 @@ def analyze_image( return client_raw_response return deserialized + analyze_image.metadata = {'url': '/analyze'} def generate_thumbnail( self, width, height, url, smart_cropping=False, custom_headers=None, raw=False, callback=None, **operation_config): @@ -248,7 +249,7 @@ def generate_thumbnail( :param height: Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. :type height: int - :param url: + :param url: Publicly reachable URL of an image :type url: str :param smart_cropping: Boolean flag for enabling smart cropping. :type smart_cropping: bool @@ -270,7 +271,7 @@ def generate_thumbnail( image_url = models.ImageUrl(url=url) # Construct URL - url = '/generateThumbnail' + url = self.generate_thumbnail.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -310,6 +311,7 @@ def generate_thumbnail( return client_raw_response return deserialized + generate_thumbnail.metadata = {'url': '/generateThumbnail'} def recognize_printed_text( self, url, detect_orientation=True, language="unk", custom_headers=None, raw=False, **operation_config): @@ -326,7 +328,7 @@ def recognize_printed_text( image orientation and correct it before further processing (e.g. if it's upside-down). :type detect_orientation: bool - :param url: + :param url: Publicly reachable URL of an image :type url: str :param language: The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: @@ -350,7 +352,7 @@ def recognize_printed_text( image_url = models.ImageUrl(url=url) # Construct URL - url = '/ocr' + url = self.recognize_printed_text.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -389,9 +391,10 @@ def recognize_printed_text( return client_raw_response return deserialized + recognize_printed_text.metadata = {'url': '/ocr'} def describe_image( - self, url, max_candidates="1", custom_headers=None, raw=False, **operation_config): + self, url, max_candidates="1", language="en", custom_headers=None, raw=False, **operation_config): """This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. @@ -402,11 +405,17 @@ def describe_image( returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. - :param url: + :param url: Publicly reachable URL of an image :type url: str :param max_candidates: Maximum number of candidate descriptions to be returned. The default is 1. :type max_candidates: str + :param language: The desired language for output generation. If this + parameter is not specified, the default value is + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -422,7 +431,7 @@ def describe_image( image_url = models.ImageUrl(url=url) # Construct URL - url = '/describe' + url = self.describe_image.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -432,6 +441,8 @@ def describe_image( query_parameters = {} if max_candidates is not None: query_parameters['maxCandidates'] = self._serialize.query("max_candidates", max_candidates, 'str') + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -460,20 +471,27 @@ def describe_image( return client_raw_response return deserialized + describe_image.metadata = {'url': '/describe'} def tag_image( - self, url, custom_headers=None, raw=False, **operation_config): + self, url, language="en", custom_headers=None, raw=False, **operation_config): """This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for - example the tag “cello” may be accompanied by the hint “musical - instrument”. All tags are in English. + example the tag 'cello' may be accompanied by the hint 'musical + instrument'. All tags are in English. - :param url: + :param url: Publicly reachable URL of an image :type url: str + :param language: The desired language for output generation. If this + parameter is not specified, the default value is + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -489,7 +507,7 @@ def tag_image( image_url = models.ImageUrl(url=url) # Construct URL - url = '/tag' + url = self.tag_image.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -497,6 +515,8 @@ def tag_image( # Construct parameters query_parameters = {} + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -525,6 +545,7 @@ def tag_image( return client_raw_response return deserialized + tag_image.metadata = {'url': '/tag'} def analyze_image_by_domain( self, model, url, custom_headers=None, raw=False, **operation_config): @@ -541,7 +562,7 @@ def analyze_image_by_domain( values include: 'Celebrities', 'Landmarks' :type model: str or ~azure.cognitiveservices.vision.computervision.models.DomainModels - :param url: + :param url: Publicly reachable URL of an image :type url: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -558,7 +579,7 @@ def analyze_image_by_domain( image_url = models.ImageUrl(url=url) # Construct URL - url = '/models/{model}/analyze' + url = self.analyze_image_by_domain.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True), 'model': self._serialize.url("model", model, 'DomainModels') @@ -595,18 +616,19 @@ def analyze_image_by_domain( return client_raw_response return deserialized + analyze_image_by_domain.metadata = {'url': '/models/{model}/analyze'} def recognize_text( self, url, detect_handwriting=False, custom_headers=None, raw=False, **operation_config): """Recognize Text operation. When you use the Recognize Text interface, - the response contains a field called “Operation-Location”. The - “Operation-Location” field contains the URL that you must use for your + the response contains a field called 'Operation-Location'. The + 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. - :param url: + :param url: Publicly reachable URL of an image :type url: str - :param detect_handwriting: If “true” is specified, handwriting - recognition is performed. If this parameter is set to “false” or is + :param detect_handwriting: If 'true' is specified, handwriting + recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. :type detect_handwriting: bool :param dict custom_headers: headers that will be added to the request @@ -622,7 +644,7 @@ def recognize_text( image_url = models.ImageUrl(url=url) # Construct URL - url = '/recognizeText' + url = self.recognize_text.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -656,6 +678,7 @@ def recognize_text( 'Operation-Location': 'str', }) return client_raw_response + recognize_text.metadata = {'url': '/recognizeText'} def get_text_operation_result( self, operation_id, custom_headers=None, raw=False, **operation_config): @@ -679,7 +702,7 @@ def get_text_operation_result( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/textOperations/{operationId}' + url = self.get_text_operation_result.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True), 'operationId': self._serialize.url("operation_id", operation_id, 'str') @@ -712,6 +735,7 @@ def get_text_operation_result( return client_raw_response return deserialized + get_text_operation_result.metadata = {'url': '/textOperations/{operationId}'} def analyze_image_in_stream( self, image, visual_features=None, details=None, language="en", custom_headers=None, raw=False, callback=None, **operation_config): @@ -739,11 +763,11 @@ def analyze_image_in_stream( feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' :type details: str - :param language: A string indicating which language to return. The - service will return recognition results in specified language. If this + :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.zh - - Simplified Chinese. Possible values include: 'en', 'zh' + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -763,7 +787,7 @@ def analyze_image_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/analyze' + url = self.analyze_image_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -805,6 +829,7 @@ def analyze_image_in_stream( return client_raw_response return deserialized + analyze_image_in_stream.metadata = {'url': '/analyze'} def generate_thumbnail_in_stream( self, width, height, image, smart_cropping=False, custom_headers=None, raw=False, callback=None, **operation_config): @@ -840,10 +865,10 @@ def generate_thumbnail_in_stream( :return: object or ClientRawResponse if raw=true :rtype: Generator or ~msrest.pipeline.ClientRawResponse :raises: - :class:`ComputerVisionErrorException` + :class:`HttpOperationError` """ # Construct URL - url = '/generateThumbnail' + url = self.generate_thumbnail_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -871,7 +896,7 @@ def generate_thumbnail_in_stream( request, header_parameters, body_content, stream=True, **operation_config) if response.status_code not in [200]: - raise models.ComputerVisionErrorException(self._deserialize, response) + raise HttpOperationError(self._deserialize, response) deserialized = None @@ -883,6 +908,7 @@ def generate_thumbnail_in_stream( return client_raw_response return deserialized + generate_thumbnail_in_stream.metadata = {'url': '/generateThumbnail'} def recognize_printed_text_in_stream( self, image, detect_orientation=True, language="unk", custom_headers=None, raw=False, callback=None, **operation_config): @@ -926,7 +952,7 @@ def recognize_printed_text_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/ocr' + url = self.recognize_printed_text_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -965,9 +991,10 @@ def recognize_printed_text_in_stream( return client_raw_response return deserialized + recognize_printed_text_in_stream.metadata = {'url': '/ocr'} def describe_image_in_stream( - self, image, max_candidates="1", custom_headers=None, raw=False, callback=None, **operation_config): + self, image, max_candidates="1", language="en", custom_headers=None, raw=False, callback=None, **operation_config): """This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. @@ -983,6 +1010,12 @@ def describe_image_in_stream( :param max_candidates: Maximum number of candidate descriptions to be returned. The default is 1. :type max_candidates: str + :param language: The desired language for output generation. If this + parameter is not specified, the default value is + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -1001,7 +1034,7 @@ def describe_image_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/describe' + url = self.describe_image_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -1011,6 +1044,8 @@ def describe_image_in_stream( query_parameters = {} if max_candidates is not None: query_parameters['maxCandidates'] = self._serialize.query("max_candidates", max_candidates, 'str') + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -1039,6 +1074,7 @@ def describe_image_in_stream( return client_raw_response return deserialized + describe_image_in_stream.metadata = {'url': '/describe'} def tag_image_in_stream( self, image, custom_headers=None, raw=False, callback=None, **operation_config): @@ -1048,8 +1084,8 @@ def tag_image_in_stream( images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for - example the tag “cello” may be accompanied by the hint “musical - instrument”. All tags are in English. + example the tag 'cello' may be accompanied by the hint 'musical + instrument'. All tags are in English. :param image: An image stream. :type image: Generator @@ -1071,7 +1107,7 @@ def tag_image_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/tag' + url = self.tag_image_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -1107,6 +1143,7 @@ def tag_image_in_stream( return client_raw_response return deserialized + tag_image_in_stream.metadata = {'url': '/tag'} def analyze_image_by_domain_in_stream( self, model, image, custom_headers=None, raw=False, callback=None, **operation_config): @@ -1141,7 +1178,7 @@ def analyze_image_by_domain_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/models/{model}/analyze' + url = self.analyze_image_by_domain_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True), 'model': self._serialize.url("model", model, 'str') @@ -1178,18 +1215,19 @@ def analyze_image_by_domain_in_stream( return client_raw_response return deserialized + analyze_image_by_domain_in_stream.metadata = {'url': '/models/{model}/analyze'} def recognize_text_in_stream( self, image, detect_handwriting=False, custom_headers=None, raw=False, callback=None, **operation_config): """Recognize Text operation. When you use the Recognize Text interface, - the response contains a field called “Operation-Location”. The - “Operation-Location” field contains the URL that you must use for your + the response contains a field called 'Operation-Location'. The + 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. :param image: An image stream. :type image: Generator - :param detect_handwriting: If “true” is specified, handwriting - recognition is performed. If this parameter is set to “false” or is + :param detect_handwriting: If 'true' is specified, handwriting + recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. :type detect_handwriting: bool :param dict custom_headers: headers that will be added to the request @@ -1208,7 +1246,7 @@ def recognize_text_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = '/recognizeText' + url = self.recognize_text_in_stream.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True) } @@ -1242,3 +1280,4 @@ def recognize_text_in_stream( 'Operation-Location': 'str', }) return client_raw_response + recognize_text_in_stream.metadata = {'url': '/recognizeText'} diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py index 8436045029f8..663008ab4f8c 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py @@ -9,42 +9,72 @@ # regenerated. # -------------------------------------------------------------------------- -from .word import Word -from .line import Line -from .recognition_result import RecognitionResult -from .text_operation_result import TextOperationResult -from .face_rectangle import FaceRectangle -from .celebrities_model import CelebritiesModel -from .category_detail import CategoryDetail -from .category import Category -from .adult_info import AdultInfo -from .color_info import ColorInfo -from .image_type import ImageType -from .image_tag import ImageTag -from .image_caption import ImageCaption -from .image_metadata import ImageMetadata -from .image_description_details import ImageDescriptionDetails -from .face_description import FaceDescription -from .image_analysis import ImageAnalysis -from .ocr_word import OcrWord -from .ocr_line import OcrLine -from .ocr_region import OcrRegion -from .ocr_result import OcrResult -from .model_description import ModelDescription -from .list_models_result import ListModelsResult -from .domain_model_results import DomainModelResults -from .image_description import ImageDescription -from .tag_result import TagResult -from .computer_vision_error import ComputerVisionError, ComputerVisionErrorException -from .image_url import ImageUrl +try: + from .word_py3 import Word + from .line_py3 import Line + from .recognition_result_py3 import RecognitionResult + from .text_operation_result_py3 import TextOperationResult + from .face_rectangle_py3 import FaceRectangle + from .celebrities_model_py3 import CelebritiesModel + from .category_detail_py3 import CategoryDetail + from .category_py3 import Category + from .adult_info_py3 import AdultInfo + from .color_info_py3 import ColorInfo + from .image_type_py3 import ImageType + from .image_tag_py3 import ImageTag + from .image_caption_py3 import ImageCaption + from .image_metadata_py3 import ImageMetadata + from .image_description_details_py3 import ImageDescriptionDetails + from .face_description_py3 import FaceDescription + from .image_analysis_py3 import ImageAnalysis + from .ocr_word_py3 import OcrWord + from .ocr_line_py3 import OcrLine + from .ocr_region_py3 import OcrRegion + from .ocr_result_py3 import OcrResult + from .model_description_py3 import ModelDescription + from .list_models_result_py3 import ListModelsResult + from .domain_model_results_py3 import DomainModelResults + from .image_description_py3 import ImageDescription + from .tag_result_py3 import TagResult + from .computer_vision_error_py3 import ComputerVisionError, ComputerVisionErrorException + from .image_url_py3 import ImageUrl +except (SyntaxError, ImportError): + from .word import Word + from .line import Line + from .recognition_result import RecognitionResult + from .text_operation_result import TextOperationResult + from .face_rectangle import FaceRectangle + from .celebrities_model import CelebritiesModel + from .category_detail import CategoryDetail + from .category import Category + from .adult_info import AdultInfo + from .color_info import ColorInfo + from .image_type import ImageType + from .image_tag import ImageTag + from .image_caption import ImageCaption + from .image_metadata import ImageMetadata + from .image_description_details import ImageDescriptionDetails + from .face_description import FaceDescription + from .image_analysis import ImageAnalysis + from .ocr_word import OcrWord + from .ocr_line import OcrLine + from .ocr_region import OcrRegion + from .ocr_result import OcrResult + from .model_description import ModelDescription + from .list_models_result import ListModelsResult + from .domain_model_results import DomainModelResults + from .image_description import ImageDescription + from .tag_result import TagResult + from .computer_vision_error import ComputerVisionError, ComputerVisionErrorException + from .image_url import ImageUrl from .computer_vision_api_enums import ( TextOperationStatusCodes, + Gender, ComputerVisionErrorCodes, VisualFeatureTypes, OcrLanguages, AzureRegions, Details, - Language1, DomainModels, ) @@ -78,11 +108,11 @@ 'ComputerVisionError', 'ComputerVisionErrorException', 'ImageUrl', 'TextOperationStatusCodes', + 'Gender', 'ComputerVisionErrorCodes', 'VisualFeatureTypes', 'OcrLanguages', 'AzureRegions', 'Details', - 'Language1', 'DomainModels', ] diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info.py index 37c70ec08dd2..aa674936a787 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info.py @@ -36,9 +36,9 @@ class AdultInfo(Model): 'racy_score': {'key': 'racyScore', 'type': 'float'}, } - def __init__(self, is_adult_content=None, is_racy_content=None, adult_score=None, racy_score=None): - super(AdultInfo, self).__init__() - self.is_adult_content = is_adult_content - self.is_racy_content = is_racy_content - self.adult_score = adult_score - self.racy_score = racy_score + def __init__(self, **kwargs): + super(AdultInfo, self).__init__(**kwargs) + self.is_adult_content = kwargs.get('is_adult_content', None) + self.is_racy_content = kwargs.get('is_racy_content', None) + self.adult_score = kwargs.get('adult_score', None) + self.racy_score = kwargs.get('racy_score', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info_py3.py new file mode 100644 index 000000000000..9c28b8351a4a --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/adult_info_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AdultInfo(Model): + """An object describing whether the image contains adult-oriented content + and/or is racy. + + :param is_adult_content: A value indicating if the image contains + adult-oriented content. + :type is_adult_content: bool + :param is_racy_content: A value indicating if the image is race. + :type is_racy_content: bool + :param adult_score: Score from 0 to 1 that indicates how much of adult + content is within the image. + :type adult_score: float + :param racy_score: Score from 0 to 1 that indicates how suggestive is the + image. + :type racy_score: float + """ + + _attribute_map = { + 'is_adult_content': {'key': 'isAdultContent', 'type': 'bool'}, + 'is_racy_content': {'key': 'isRacyContent', 'type': 'bool'}, + 'adult_score': {'key': 'adultScore', 'type': 'float'}, + 'racy_score': {'key': 'racyScore', 'type': 'float'}, + } + + def __init__(self, *, is_adult_content: bool=None, is_racy_content: bool=None, adult_score: float=None, racy_score: float=None, **kwargs) -> None: + super(AdultInfo, self).__init__(**kwargs) + self.is_adult_content = is_adult_content + self.is_racy_content = is_racy_content + self.adult_score = adult_score + self.racy_score = racy_score diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category.py index 1a3e5cd8fe0d..234f283b0d5c 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category.py @@ -19,7 +19,7 @@ class Category(Model): :type name: str :param score: Scoring of the category. :type score: float - :param detail: Additional category detail if available. + :param detail: :type detail: ~azure.cognitiveservices.vision.computervision.models.CategoryDetail """ @@ -30,8 +30,8 @@ class Category(Model): 'detail': {'key': 'detail', 'type': 'CategoryDetail'}, } - def __init__(self, name=None, score=None, detail=None): - super(Category, self).__init__() - self.name = name - self.score = score - self.detail = detail + def __init__(self, **kwargs): + super(Category, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.score = kwargs.get('score', None) + self.detail = kwargs.get('detail', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py index 451ba60464be..eefbe5b691dc 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py @@ -24,6 +24,6 @@ class CategoryDetail(Model): 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, } - def __init__(self, celebrities=None): - super(CategoryDetail, self).__init__() - self.celebrities = celebrities + def __init__(self, **kwargs): + super(CategoryDetail, self).__init__(**kwargs) + self.celebrities = kwargs.get('celebrities', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py new file mode 100644 index 000000000000..b155197dab02 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CategoryDetail(Model): + """An object describing additional category details. + + :param celebrities: An array of celebrities if any identified. + :type celebrities: + list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + """ + + _attribute_map = { + 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, + } + + def __init__(self, *, celebrities=None, **kwargs) -> None: + super(CategoryDetail, self).__init__(**kwargs) + self.celebrities = celebrities diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py new file mode 100644 index 000000000000..e6c0c0485cc7 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Category(Model): + """An object describing identified category. + + :param name: Name of the category. + :type name: str + :param score: Scoring of the category. + :type score: float + :param detail: + :type detail: + ~azure.cognitiveservices.vision.computervision.models.CategoryDetail + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'score': {'key': 'score', 'type': 'float'}, + 'detail': {'key': 'detail', 'type': 'CategoryDetail'}, + } + + def __init__(self, *, name: str=None, score: float=None, detail=None, **kwargs) -> None: + super(Category, self).__init__(**kwargs) + self.name = name + self.score = score + self.detail = detail diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model.py index 4f1db214ffcb..bb6db51def40 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model.py @@ -30,8 +30,8 @@ class CelebritiesModel(Model): 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, } - def __init__(self, name=None, confidence=None, face_rectangle=None): - super(CelebritiesModel, self).__init__() - self.name = name - self.confidence = confidence - self.face_rectangle = face_rectangle + def __init__(self, **kwargs): + super(CelebritiesModel, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.confidence = kwargs.get('confidence', None) + self.face_rectangle = kwargs.get('face_rectangle', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model_py3.py new file mode 100644 index 000000000000..d50e338a979a --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrities_model_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CelebritiesModel(Model): + """An object describing possible celebrity identification. + + :param name: Name of the celebrity. + :type name: str + :param confidence: Level of confidence ranging from 0 to 1. + :type confidence: float + :param face_rectangle: + :type face_rectangle: + ~azure.cognitiveservices.vision.computervision.models.FaceRectangle + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, + } + + def __init__(self, *, name: str=None, confidence: float=None, face_rectangle=None, **kwargs) -> None: + super(CelebritiesModel, self).__init__(**kwargs) + self.name = name + self.confidence = confidence + self.face_rectangle = face_rectangle diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info.py index 5425d3ec6315..60d583ee8340 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info.py @@ -35,10 +35,10 @@ class ColorInfo(Model): 'is_bw_img': {'key': 'isBWImg', 'type': 'bool'}, } - def __init__(self, dominant_color_foreground=None, dominant_color_background=None, dominant_colors=None, accent_color=None, is_bw_img=None): - super(ColorInfo, self).__init__() - self.dominant_color_foreground = dominant_color_foreground - self.dominant_color_background = dominant_color_background - self.dominant_colors = dominant_colors - self.accent_color = accent_color - self.is_bw_img = is_bw_img + def __init__(self, **kwargs): + super(ColorInfo, self).__init__(**kwargs) + self.dominant_color_foreground = kwargs.get('dominant_color_foreground', None) + self.dominant_color_background = kwargs.get('dominant_color_background', None) + self.dominant_colors = kwargs.get('dominant_colors', None) + self.accent_color = kwargs.get('accent_color', None) + self.is_bw_img = kwargs.get('is_bw_img', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info_py3.py new file mode 100644 index 000000000000..c4320d353850 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/color_info_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ColorInfo(Model): + """An object providing additional metadata describing color attributes. + + :param dominant_color_foreground: Possible dominant foreground color. + :type dominant_color_foreground: str + :param dominant_color_background: Possible dominant background color. + :type dominant_color_background: str + :param dominant_colors: An array of possible dominant colors. + :type dominant_colors: list[str] + :param accent_color: Possible accent color. + :type accent_color: str + :param is_bw_img: A value indicating if the image is black and white. + :type is_bw_img: bool + """ + + _attribute_map = { + 'dominant_color_foreground': {'key': 'dominantColorForeground', 'type': 'str'}, + 'dominant_color_background': {'key': 'dominantColorBackground', 'type': 'str'}, + 'dominant_colors': {'key': 'dominantColors', 'type': '[str]'}, + 'accent_color': {'key': 'accentColor', 'type': 'str'}, + 'is_bw_img': {'key': 'isBWImg', 'type': 'bool'}, + } + + def __init__(self, *, dominant_color_foreground: str=None, dominant_color_background: str=None, dominant_colors=None, accent_color: str=None, is_bw_img: bool=None, **kwargs) -> None: + super(ColorInfo, self).__init__(**kwargs) + self.dominant_color_foreground = dominant_color_foreground + self.dominant_color_background = dominant_color_background + self.dominant_colors = dominant_colors + self.accent_color = accent_color + self.is_bw_img = is_bw_img diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py index 81f543b7151b..e8ce61ec14bb 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py @@ -12,7 +12,7 @@ from enum import Enum -class TextOperationStatusCodes(Enum): +class TextOperationStatusCodes(str, Enum): not_started = "Not Started" running = "Running" @@ -20,7 +20,13 @@ class TextOperationStatusCodes(Enum): succeeded = "Succeeded" -class ComputerVisionErrorCodes(Enum): +class Gender(str, Enum): + + male = "Male" + female = "Female" + + +class ComputerVisionErrorCodes(str, Enum): invalid_image_url = "InvalidImageUrl" invalid_image_format = "InvalidImageFormat" @@ -37,7 +43,7 @@ class ComputerVisionErrorCodes(Enum): storage_exception = "StorageException" -class VisualFeatureTypes(Enum): +class VisualFeatureTypes(str, Enum): image_type = "ImageType" faces = "Faces" @@ -48,7 +54,7 @@ class VisualFeatureTypes(Enum): description = "Description" -class OcrLanguages(Enum): +class OcrLanguages(str, Enum): unk = "unk" zh_hans = "zh-Hans" @@ -79,7 +85,7 @@ class OcrLanguages(Enum): sk = "sk" -class AzureRegions(Enum): +class AzureRegions(str, Enum): westus = "westus" westeurope = "westeurope" @@ -95,19 +101,13 @@ class AzureRegions(Enum): brazilsouth = "brazilsouth" -class Details(Enum): +class Details(str, Enum): celebrities = "Celebrities" landmarks = "Landmarks" -class Language1(Enum): - - en = "en" - zh = "zh" - - -class DomainModels(Enum): +class DomainModels(str, Enum): celebrities = "Celebrities" landmarks = "Landmarks" diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error.py index 651725041655..4350fe0694ab 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error.py @@ -16,14 +16,17 @@ class ComputerVisionError(Model): """ComputerVisionError. - :param code: The error code. Possible values include: 'InvalidImageUrl', - 'InvalidImageFormat', 'InvalidImageSize', 'NotSupportedVisualFeature', - 'NotSupportedImage', 'InvalidDetails', 'NotSupportedLanguage', - 'BadArgument', 'FailedToProcess', 'Timeout', 'InternalServerError', - 'Unspecified', 'StorageException' + All required parameters must be populated in order to send to Azure. + + :param code: Required. The error code. Possible values include: + 'InvalidImageUrl', 'InvalidImageFormat', 'InvalidImageSize', + 'NotSupportedVisualFeature', 'NotSupportedImage', 'InvalidDetails', + 'NotSupportedLanguage', 'BadArgument', 'FailedToProcess', 'Timeout', + 'InternalServerError', 'Unspecified', 'StorageException' :type code: str or ~azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorCodes - :param message: A message explaining the error reported by the service. + :param message: Required. A message explaining the error reported by the + service. :type message: str :param request_id: A unique request identifier. :type request_id: str @@ -40,11 +43,11 @@ class ComputerVisionError(Model): 'request_id': {'key': 'requestId', 'type': 'str'}, } - def __init__(self, code, message, request_id=None): - super(ComputerVisionError, self).__init__() - self.code = code - self.message = message - self.request_id = request_id + def __init__(self, **kwargs): + super(ComputerVisionError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.request_id = kwargs.get('request_id', None) class ComputerVisionErrorException(HttpOperationError): diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error_py3.py new file mode 100644 index 000000000000..b4112ddba880 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_error_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class ComputerVisionError(Model): + """ComputerVisionError. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. The error code. Possible values include: + 'InvalidImageUrl', 'InvalidImageFormat', 'InvalidImageSize', + 'NotSupportedVisualFeature', 'NotSupportedImage', 'InvalidDetails', + 'NotSupportedLanguage', 'BadArgument', 'FailedToProcess', 'Timeout', + 'InternalServerError', 'Unspecified', 'StorageException' + :type code: str or + ~azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorCodes + :param message: Required. A message explaining the error reported by the + service. + :type message: str + :param request_id: A unique request identifier. + :type request_id: str + """ + + _validation = { + 'code': {'required': True}, + 'message': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'ComputerVisionErrorCodes'}, + 'message': {'key': 'message', 'type': 'str'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + } + + def __init__(self, *, code, message: str, request_id: str=None, **kwargs) -> None: + super(ComputerVisionError, self).__init__(**kwargs) + self.code = code + self.message = message + self.request_id = request_id + + +class ComputerVisionErrorException(HttpOperationError): + """Server responsed with exception of type: 'ComputerVisionError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(ComputerVisionErrorException, self).__init__(deserialize, response, 'ComputerVisionError', *args) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py index bc241232c803..4093d33791b5 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py @@ -22,7 +22,7 @@ class DomainModelResults(Model): list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] :param request_id: Id of the REST API request. :type request_id: str - :param metadata: Additional image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -33,8 +33,8 @@ class DomainModelResults(Model): 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, celebrities=None, request_id=None, metadata=None): - super(DomainModelResults, self).__init__() - self.celebrities = celebrities - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(DomainModelResults, self).__init__(**kwargs) + self.celebrities = kwargs.get('celebrities', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py new file mode 100644 index 000000000000..ea639d2d2fc8 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DomainModelResults(Model): + """Result of image analysis using a specific domain model including additional + metadata. + + :param celebrities: An array of possible celebritied identified in the + image. + :type celebrities: + list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'celebrities': {'key': 'result.celebrities', 'type': '[CelebritiesModel]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, celebrities=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(DomainModelResults, self).__init__(**kwargs) + self.celebrities = celebrities + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description.py index bf8f36245e08..383a6ecd9576 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description.py @@ -20,7 +20,7 @@ class FaceDescription(Model): :param gender: Possible gender of the face. Possible values include: 'Male', 'Female' :type gender: str or - ~azure.cognitiveservices.vision.computervision.models.enum + ~azure.cognitiveservices.vision.computervision.models.Gender :param face_rectangle: :type face_rectangle: ~azure.cognitiveservices.vision.computervision.models.FaceRectangle @@ -28,12 +28,12 @@ class FaceDescription(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'int'}, - 'gender': {'key': 'gender', 'type': 'str'}, + 'gender': {'key': 'gender', 'type': 'Gender'}, 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, } - def __init__(self, age=None, gender=None, face_rectangle=None): - super(FaceDescription, self).__init__() - self.age = age - self.gender = gender - self.face_rectangle = face_rectangle + def __init__(self, **kwargs): + super(FaceDescription, self).__init__(**kwargs) + self.age = kwargs.get('age', None) + self.gender = kwargs.get('gender', None) + self.face_rectangle = kwargs.get('face_rectangle', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description_py3.py new file mode 100644 index 000000000000..63e9e941714c --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_description_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FaceDescription(Model): + """An object describing a face identified in the image. + + :param age: Possible age of the face. + :type age: int + :param gender: Possible gender of the face. Possible values include: + 'Male', 'Female' + :type gender: str or + ~azure.cognitiveservices.vision.computervision.models.Gender + :param face_rectangle: + :type face_rectangle: + ~azure.cognitiveservices.vision.computervision.models.FaceRectangle + """ + + _attribute_map = { + 'age': {'key': 'age', 'type': 'int'}, + 'gender': {'key': 'gender', 'type': 'Gender'}, + 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, + } + + def __init__(self, *, age: int=None, gender=None, face_rectangle=None, **kwargs) -> None: + super(FaceDescription, self).__init__(**kwargs) + self.age = age + self.gender = gender + self.face_rectangle = face_rectangle diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle.py index 6edf8f06acbb..566ecc6100d4 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle.py @@ -32,9 +32,9 @@ class FaceRectangle(Model): 'height': {'key': 'height', 'type': 'int'}, } - def __init__(self, left=None, top=None, width=None, height=None): - super(FaceRectangle, self).__init__() - self.left = left - self.top = top - self.width = width - self.height = height + def __init__(self, **kwargs): + super(FaceRectangle, self).__init__(**kwargs) + self.left = kwargs.get('left', None) + self.top = kwargs.get('top', None) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle_py3.py new file mode 100644 index 000000000000..aa8b0daaff59 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/face_rectangle_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FaceRectangle(Model): + """An object describing face rectangle. + + :param left: X-coordinate of the top left point of the face. + :type left: int + :param top: Y-coordinate of the top left point of the face. + :type top: int + :param width: Width measured from the top-left point of the face. + :type width: int + :param height: Height measured from the top-left point of the face. + :type height: int + """ + + _attribute_map = { + 'left': {'key': 'left', 'type': 'int'}, + 'top': {'key': 'top', 'type': 'int'}, + 'width': {'key': 'width', 'type': 'int'}, + 'height': {'key': 'height', 'type': 'int'}, + } + + def __init__(self, *, left: int=None, top: int=None, width: int=None, height: int=None, **kwargs) -> None: + super(FaceRectangle, self).__init__(**kwargs) + self.left = left + self.top = top + self.width = width + self.height = height diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis.py index 5bf9320a7d26..5d7e3c308e43 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis.py @@ -18,21 +18,19 @@ class ImageAnalysis(Model): :param categories: An array indicating identified categories. :type categories: list[~azure.cognitiveservices.vision.computervision.models.Category] - :param adult: A property scoring on whether the image is adult-oriented - and/or racy. + :param adult: :type adult: ~azure.cognitiveservices.vision.computervision.models.AdultInfo - :param color: A property scoring on color spectrums. + :param color: :type color: ~azure.cognitiveservices.vision.computervision.models.ColorInfo - :param image_type: A property indicating type of image (whether it's - clipart or line drawing) + :param image_type: :type image_type: ~azure.cognitiveservices.vision.computervision.models.ImageType :param tags: A list of tags with confidence level. :type tags: list[~azure.cognitiveservices.vision.computervision.models.ImageTag] - :param description: Description of the image. + :param description: :type description: ~azure.cognitiveservices.vision.computervision.models.ImageDescriptionDetails :param faces: An array of possible faces within the image. @@ -40,7 +38,7 @@ class ImageAnalysis(Model): list[~azure.cognitiveservices.vision.computervision.models.FaceDescription] :param request_id: Id of the request for tracking purposes. :type request_id: str - :param metadata: Image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -57,14 +55,14 @@ class ImageAnalysis(Model): 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, categories=None, adult=None, color=None, image_type=None, tags=None, description=None, faces=None, request_id=None, metadata=None): - super(ImageAnalysis, self).__init__() - self.categories = categories - self.adult = adult - self.color = color - self.image_type = image_type - self.tags = tags - self.description = description - self.faces = faces - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(ImageAnalysis, self).__init__(**kwargs) + self.categories = kwargs.get('categories', None) + self.adult = kwargs.get('adult', None) + self.color = kwargs.get('color', None) + self.image_type = kwargs.get('image_type', None) + self.tags = kwargs.get('tags', None) + self.description = kwargs.get('description', None) + self.faces = kwargs.get('faces', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis_py3.py new file mode 100644 index 000000000000..45eaf49c71d8 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_analysis_py3.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageAnalysis(Model): + """Result of AnalyzeImage operation. + + :param categories: An array indicating identified categories. + :type categories: + list[~azure.cognitiveservices.vision.computervision.models.Category] + :param adult: + :type adult: + ~azure.cognitiveservices.vision.computervision.models.AdultInfo + :param color: + :type color: + ~azure.cognitiveservices.vision.computervision.models.ColorInfo + :param image_type: + :type image_type: + ~azure.cognitiveservices.vision.computervision.models.ImageType + :param tags: A list of tags with confidence level. + :type tags: + list[~azure.cognitiveservices.vision.computervision.models.ImageTag] + :param description: + :type description: + ~azure.cognitiveservices.vision.computervision.models.ImageDescriptionDetails + :param faces: An array of possible faces within the image. + :type faces: + list[~azure.cognitiveservices.vision.computervision.models.FaceDescription] + :param request_id: Id of the request for tracking purposes. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'categories': {'key': 'categories', 'type': '[Category]'}, + 'adult': {'key': 'adult', 'type': 'AdultInfo'}, + 'color': {'key': 'color', 'type': 'ColorInfo'}, + 'image_type': {'key': 'imageType', 'type': 'ImageType'}, + 'tags': {'key': 'tags', 'type': '[ImageTag]'}, + 'description': {'key': 'description', 'type': 'ImageDescriptionDetails'}, + 'faces': {'key': 'faces', 'type': '[FaceDescription]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, categories=None, adult=None, color=None, image_type=None, tags=None, description=None, faces=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(ImageAnalysis, self).__init__(**kwargs) + self.categories = categories + self.adult = adult + self.color = color + self.image_type = image_type + self.tags = tags + self.description = description + self.faces = faces + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption.py index 448226f12d93..ec9aa4b93f80 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption.py @@ -26,7 +26,7 @@ class ImageCaption(Model): 'confidence': {'key': 'confidence', 'type': 'float'}, } - def __init__(self, text=None, confidence=None): - super(ImageCaption, self).__init__() - self.text = text - self.confidence = confidence + def __init__(self, **kwargs): + super(ImageCaption, self).__init__(**kwargs) + self.text = kwargs.get('text', None) + self.confidence = kwargs.get('confidence', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption_py3.py new file mode 100644 index 000000000000..782f89cda23e --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_caption_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageCaption(Model): + """An image caption, i.e. a brief description of what the image depicts. + + :param text: The text of the caption + :type text: str + :param confidence: The level of confidence the service has in the caption + :type confidence: float + """ + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, text: str=None, confidence: float=None, **kwargs) -> None: + super(ImageCaption, self).__init__(**kwargs) + self.text = text + self.confidence = confidence diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py index c9f0d374a8b3..6a84422b6b65 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py @@ -23,7 +23,7 @@ class ImageDescription(Model): list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] :param request_id: Id of the REST API request. :type request_id: str - :param metadata: Image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -35,9 +35,9 @@ class ImageDescription(Model): 'metadata': {'key': 'description.metadata', 'type': 'ImageMetadata'}, } - def __init__(self, tags=None, captions=None, request_id=None, metadata=None): - super(ImageDescription, self).__init__() - self.tags = tags - self.captions = captions - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(ImageDescription, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.captions = kwargs.get('captions', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py index f8ada2c66c82..1e6afbb99ed7 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py @@ -23,7 +23,7 @@ class ImageDescriptionDetails(Model): list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] :param request_id: Id of the REST API request. :type request_id: str - :param metadata: Image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -35,9 +35,9 @@ class ImageDescriptionDetails(Model): 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, tags=None, captions=None, request_id=None, metadata=None): - super(ImageDescriptionDetails, self).__init__() - self.tags = tags - self.captions = captions - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(ImageDescriptionDetails, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.captions = kwargs.get('captions', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py new file mode 100644 index 000000000000..702d4ac029de --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageDescriptionDetails(Model): + """A collection of content tags, along with a list of captions sorted by + confidence level, and image metadata. + + :param tags: A collection of image tags. + :type tags: list[str] + :param captions: A list of captions, sorted by confidence level. + :type captions: + list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '[str]'}, + 'captions': {'key': 'captions', 'type': '[ImageCaption]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, tags=None, captions=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(ImageDescriptionDetails, self).__init__(**kwargs) + self.tags = tags + self.captions = captions + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py new file mode 100644 index 000000000000..3ec3fa9c951a --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageDescription(Model): + """A collection of content tags, along with a list of captions sorted by + confidence level, and image metadata. + + :param tags: A collection of image tags. + :type tags: list[str] + :param captions: A list of captions, sorted by confidence level. + :type captions: + list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'tags': {'key': 'description.tags', 'type': '[str]'}, + 'captions': {'key': 'description.captions', 'type': '[ImageCaption]'}, + 'request_id': {'key': 'description.requestId', 'type': 'str'}, + 'metadata': {'key': 'description.metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, tags=None, captions=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(ImageDescription, self).__init__(**kwargs) + self.tags = tags + self.captions = captions + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata.py index baa2a46a8dd2..797206379282 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata.py @@ -29,8 +29,8 @@ class ImageMetadata(Model): 'format': {'key': 'format', 'type': 'str'}, } - def __init__(self, width=None, height=None, format=None): - super(ImageMetadata, self).__init__() - self.width = width - self.height = height - self.format = format + def __init__(self, **kwargs): + super(ImageMetadata, self).__init__(**kwargs) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) + self.format = kwargs.get('format', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata_py3.py new file mode 100644 index 000000000000..2d6bb256c481 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_metadata_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageMetadata(Model): + """Image metadata. + + :param width: Image width + :type width: int + :param height: Image height + :type height: int + :param format: Image format + :type format: str + """ + + _attribute_map = { + 'width': {'key': 'width', 'type': 'int'}, + 'height': {'key': 'height', 'type': 'int'}, + 'format': {'key': 'format', 'type': 'str'}, + } + + def __init__(self, *, width: int=None, height: int=None, format: str=None, **kwargs) -> None: + super(ImageMetadata, self).__init__(**kwargs) + self.width = width + self.height = height + self.format = format diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py index 14524a29143e..93f349e9d4c0 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py @@ -26,7 +26,7 @@ class ImageTag(Model): 'confidence': {'key': 'confidence', 'type': 'float'}, } - def __init__(self, name=None, confidence=None): - super(ImageTag, self).__init__() - self.name = name - self.confidence = confidence + def __init__(self, **kwargs): + super(ImageTag, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.confidence = kwargs.get('confidence', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py new file mode 100644 index 000000000000..ed598af42843 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageTag(Model): + """An image caption, i.e. a brief description of what the image depicts. + + :param name: The tag value + :type name: str + :param confidence: The level of confidence the service has in the caption + :type confidence: float + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, name: str=None, confidence: float=None, **kwargs) -> None: + super(ImageTag, self).__init__(**kwargs) + self.name = name + self.confidence = confidence diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type.py index 6716583b37ab..2c475662850c 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type.py @@ -27,7 +27,7 @@ class ImageType(Model): 'line_drawing_type': {'key': 'lineDrawingType', 'type': 'float'}, } - def __init__(self, clip_art_type=None, line_drawing_type=None): - super(ImageType, self).__init__() - self.clip_art_type = clip_art_type - self.line_drawing_type = line_drawing_type + def __init__(self, **kwargs): + super(ImageType, self).__init__(**kwargs) + self.clip_art_type = kwargs.get('clip_art_type', None) + self.line_drawing_type = kwargs.get('line_drawing_type', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type_py3.py new file mode 100644 index 000000000000..ecfd8b6af808 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_type_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageType(Model): + """An object providing possible image types and matching confidence levels. + + :param clip_art_type: Confidence level that the image is a clip art. + :type clip_art_type: float + :param line_drawing_type: Confidence level that the image is a line + drawing. + :type line_drawing_type: float + """ + + _attribute_map = { + 'clip_art_type': {'key': 'clipArtType', 'type': 'float'}, + 'line_drawing_type': {'key': 'lineDrawingType', 'type': 'float'}, + } + + def __init__(self, *, clip_art_type: float=None, line_drawing_type: float=None, **kwargs) -> None: + super(ImageType, self).__init__(**kwargs) + self.clip_art_type = clip_art_type + self.line_drawing_type = line_drawing_type diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url.py index 05f4dab7f611..25106793ad9c 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url.py @@ -15,7 +15,9 @@ class ImageUrl(Model): """ImageUrl. - :param url: + All required parameters must be populated in order to send to Azure. + + :param url: Required. Publicly reachable URL of an image :type url: str """ @@ -27,6 +29,6 @@ class ImageUrl(Model): 'url': {'key': 'url', 'type': 'str'}, } - def __init__(self, url): - super(ImageUrl, self).__init__() - self.url = url + def __init__(self, **kwargs): + super(ImageUrl, self).__init__(**kwargs) + self.url = kwargs.get('url', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url_py3.py new file mode 100644 index 000000000000..3e00709f804d --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_url_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageUrl(Model): + """ImageUrl. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. Publicly reachable URL of an image + :type url: str + """ + + _validation = { + 'url': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, *, url: str, **kwargs) -> None: + super(ImageUrl, self).__init__(**kwargs) + self.url = url diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line.py index f9f03ab03780..3c6df06a5c12 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line.py @@ -30,8 +30,8 @@ class Line(Model): 'words': {'key': 'words', 'type': '[Word]'}, } - def __init__(self, bounding_box=None, text=None, words=None): - super(Line, self).__init__() - self.bounding_box = bounding_box - self.text = text - self.words = words + def __init__(self, **kwargs): + super(Line, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.text = kwargs.get('text', None) + self.words = kwargs.get('words', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line_py3.py new file mode 100644 index 000000000000..eaa7b16fa07c --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/line_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Line(Model): + """Line. + + :param bounding_box: + :type bounding_box: list[int] + :param text: + :type text: str + :param words: + :type words: + list[~azure.cognitiveservices.vision.computervision.models.Word] + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': '[int]'}, + 'text': {'key': 'text', 'type': 'str'}, + 'words': {'key': 'words', 'type': '[Word]'}, + } + + def __init__(self, *, bounding_box=None, text: str=None, words=None, **kwargs) -> None: + super(Line, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.text = text + self.words = words diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result.py index ad6e2cc61c42..de784bdce7dc 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result.py @@ -31,6 +31,6 @@ class ListModelsResult(Model): 'models_property': {'key': 'models', 'type': '[ModelDescription]'}, } - def __init__(self): - super(ListModelsResult, self).__init__() + def __init__(self, **kwargs): + super(ListModelsResult, self).__init__(**kwargs) self.models_property = None diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result_py3.py new file mode 100644 index 000000000000..9fff20826a83 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/list_models_result_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ListModelsResult(Model): + """Result of the List Domain Models operation. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar models_property: An array of supported models. + :vartype models_property: + list[~azure.cognitiveservices.vision.computervision.models.ModelDescription] + """ + + _validation = { + 'models_property': {'readonly': True}, + } + + _attribute_map = { + 'models_property': {'key': 'models', 'type': '[ModelDescription]'}, + } + + def __init__(self, **kwargs) -> None: + super(ListModelsResult, self).__init__(**kwargs) + self.models_property = None diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description.py index 4ac5b2c75ddb..b12081359419 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description.py @@ -26,7 +26,7 @@ class ModelDescription(Model): 'categories': {'key': 'categories', 'type': '[str]'}, } - def __init__(self, name=None, categories=None): - super(ModelDescription, self).__init__() - self.name = name - self.categories = categories + def __init__(self, **kwargs): + super(ModelDescription, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.categories = kwargs.get('categories', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description_py3.py new file mode 100644 index 000000000000..e5fc81d86ff8 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/model_description_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ModelDescription(Model): + """An object describing supported model by name and categories. + + :param name: + :type name: str + :param categories: + :type categories: list[str] + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'categories': {'key': 'categories', 'type': '[str]'}, + } + + def __init__(self, *, name: str=None, categories=None, **kwargs) -> None: + super(ModelDescription, self).__init__(**kwargs) + self.name = name + self.categories = categories diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line.py index 1a417d2ebc3b..72eef4e29c53 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line.py @@ -33,7 +33,7 @@ class OcrLine(Model): 'words': {'key': 'words', 'type': '[OcrWord]'}, } - def __init__(self, bounding_box=None, words=None): - super(OcrLine, self).__init__() - self.bounding_box = bounding_box - self.words = words + def __init__(self, **kwargs): + super(OcrLine, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.words = kwargs.get('words', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line_py3.py new file mode 100644 index 000000000000..99d4636b7e81 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_line_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OcrLine(Model): + """An object describing a single recognized line of text. + + :param bounding_box: Bounding box of a recognized line. The four integers + represent the x-coordinate of the left edge, the y-coordinate of the top + edge, width, and height of the bounding box, in the coordinate system of + the input image, after it has been rotated around its center according to + the detected text angle (see textAngle property), with the origin at the + top-left corner, and the y-axis pointing down. + :type bounding_box: str + :param words: An array of objects, where each object represents a + recognized word. + :type words: + list[~azure.cognitiveservices.vision.computervision.models.OcrWord] + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, + 'words': {'key': 'words', 'type': '[OcrWord]'}, + } + + def __init__(self, *, bounding_box: str=None, words=None, **kwargs) -> None: + super(OcrLine, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.words = words diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region.py index f6f75cf27b3c..ddbeda6431ed 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region.py @@ -33,7 +33,7 @@ class OcrRegion(Model): 'lines': {'key': 'lines', 'type': '[OcrLine]'}, } - def __init__(self, bounding_box=None, lines=None): - super(OcrRegion, self).__init__() - self.bounding_box = bounding_box - self.lines = lines + def __init__(self, **kwargs): + super(OcrRegion, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.lines = kwargs.get('lines', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region_py3.py new file mode 100644 index 000000000000..6ae209a84e5c --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_region_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OcrRegion(Model): + """A region consists of multiple lines (e.g. a column of text in a + multi-column document). + + :param bounding_box: Bounding box of a recognized region. The four + integers represent the x-coordinate of the left edge, the y-coordinate of + the top edge, width, and height of the bounding box, in the coordinate + system of the input image, after it has been rotated around its center + according to the detected text angle (see textAngle property), with the + origin at the top-left corner, and the y-axis pointing down. + :type bounding_box: str + :param lines: + :type lines: + list[~azure.cognitiveservices.vision.computervision.models.OcrLine] + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, + 'lines': {'key': 'lines', 'type': '[OcrLine]'}, + } + + def __init__(self, *, bounding_box: str=None, lines=None, **kwargs) -> None: + super(OcrRegion, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.lines = lines diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py index a3f5956f360b..eb573d1338e7 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py @@ -47,9 +47,9 @@ class OcrResult(Model): 'regions': {'key': 'regions', 'type': '[OcrRegion]'}, } - def __init__(self, language=None, text_angle=None, orientation=None, regions=None): - super(OcrResult, self).__init__() - self.language = language - self.text_angle = text_angle - self.orientation = orientation - self.regions = regions + def __init__(self, **kwargs): + super(OcrResult, self).__init__(**kwargs) + self.language = kwargs.get('language', None) + self.text_angle = kwargs.get('text_angle', None) + self.orientation = kwargs.get('orientation', None) + self.regions = kwargs.get('regions', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py new file mode 100644 index 000000000000..413fb9637d4e --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OcrResult(Model): + """OcrResult. + + :param language: + :type language: + ~azure.cognitiveservices.vision.computervision.models.OcrResult + :param text_angle: The angle, in degrees, of the detected text with + respect to the closest horizontal or vertical direction. After rotating + the input image clockwise by this angle, the recognized text lines become + horizontal or vertical. In combination with the orientation property it + can be used to overlay recognition results correctly on the original + image, by rotating either the original image or recognition results by a + suitable angle around the center of the original image. If the angle + cannot be confidently detected, this property is not present. If the image + contains text at different angles, only part of the text will be + recognized correctly. + :type text_angle: float + :param orientation: Orientation of the text recognized in the image. The + value (up,down,left, or right) refers to the direction that the top of the + recognized text is facing, after the image has been rotated around its + center according to the detected text angle (see textAngle property). + :type orientation: str + :param regions: An array of objects, where each object represents a region + of recognized text. + :type regions: + list[~azure.cognitiveservices.vision.computervision.models.OcrRegion] + """ + + _attribute_map = { + 'language': {'key': 'language', 'type': 'OcrResult'}, + 'text_angle': {'key': 'textAngle', 'type': 'float'}, + 'orientation': {'key': 'orientation', 'type': 'str'}, + 'regions': {'key': 'regions', 'type': '[OcrRegion]'}, + } + + def __init__(self, *, language=None, text_angle: float=None, orientation: str=None, regions=None, **kwargs) -> None: + super(OcrResult, self).__init__(**kwargs) + self.language = language + self.text_angle = text_angle + self.orientation = orientation + self.regions = regions diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word.py index d5cf1f2e5679..c0ff18701bff 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word.py @@ -31,7 +31,7 @@ class OcrWord(Model): 'text': {'key': 'text', 'type': 'str'}, } - def __init__(self, bounding_box=None, text=None): - super(OcrWord, self).__init__() - self.bounding_box = bounding_box - self.text = text + def __init__(self, **kwargs): + super(OcrWord, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.text = kwargs.get('text', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word_py3.py new file mode 100644 index 000000000000..3e7705087b49 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_word_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OcrWord(Model): + """Information on a recognized word. + + :param bounding_box: Bounding box of a recognized word. The four integers + represent the x-coordinate of the left edge, the y-coordinate of the top + edge, width, and height of the bounding box, in the coordinate system of + the input image, after it has been rotated around its center according to + the detected text angle (see textAngle property), with the origin at the + top-left corner, and the y-axis pointing down. + :type bounding_box: str + :param text: String value of a recognized word. + :type text: str + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, + 'text': {'key': 'text', 'type': 'str'}, + } + + def __init__(self, *, bounding_box: str=None, text: str=None, **kwargs) -> None: + super(OcrWord, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.text = text diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result.py index 791ac7db95e6..628dde0dae9a 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result.py @@ -24,6 +24,6 @@ class RecognitionResult(Model): 'lines': {'key': 'lines', 'type': '[Line]'}, } - def __init__(self, lines=None): - super(RecognitionResult, self).__init__() - self.lines = lines + def __init__(self, **kwargs): + super(RecognitionResult, self).__init__(**kwargs) + self.lines = kwargs.get('lines', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result_py3.py new file mode 100644 index 000000000000..c809646eac7b --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/recognition_result_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecognitionResult(Model): + """RecognitionResult. + + :param lines: + :type lines: + list[~azure.cognitiveservices.vision.computervision.models.Line] + """ + + _attribute_map = { + 'lines': {'key': 'lines', 'type': '[Line]'}, + } + + def __init__(self, *, lines=None, **kwargs) -> None: + super(RecognitionResult, self).__init__(**kwargs) + self.lines = lines diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result.py index 4b93359d42cc..70ed25e51294 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result.py @@ -21,7 +21,7 @@ class TagResult(Model): list[~azure.cognitiveservices.vision.computervision.models.ImageTag] :param request_id: Id of the REST API request. :type request_id: str - :param metadata: Image metadata + :param metadata: :type metadata: ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ @@ -32,8 +32,8 @@ class TagResult(Model): 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, tags=None, request_id=None, metadata=None): - super(TagResult, self).__init__() - self.tags = tags - self.request_id = request_id - self.metadata = metadata + def __init__(self, **kwargs): + super(TagResult, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result_py3.py new file mode 100644 index 000000000000..5957e130893c --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/tag_result_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TagResult(Model): + """The results of a image tag operation, including any tags and image + metadata. + + :param tags: A list of tags with confidence level. + :type tags: + list[~azure.cognitiveservices.vision.computervision.models.ImageTag] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '[ImageTag]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, tags=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(TagResult, self).__init__(**kwargs) + self.tags = tags + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result.py index 02fd499ea644..301f07ab62dd 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result.py @@ -29,7 +29,7 @@ class TextOperationResult(Model): 'recognition_result': {'key': 'recognitionResult', 'type': 'RecognitionResult'}, } - def __init__(self, status=None, recognition_result=None): - super(TextOperationResult, self).__init__() - self.status = status - self.recognition_result = recognition_result + def __init__(self, **kwargs): + super(TextOperationResult, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.recognition_result = kwargs.get('recognition_result', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result_py3.py new file mode 100644 index 000000000000..cd6adfb3d754 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/text_operation_result_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TextOperationResult(Model): + """TextOperationResult. + + :param status: Status of the text operation. Possible values include: 'Not + Started', 'Running', 'Failed', 'Succeeded' + :type status: str or + ~azure.cognitiveservices.vision.computervision.models.TextOperationStatusCodes + :param recognition_result: + :type recognition_result: + ~azure.cognitiveservices.vision.computervision.models.RecognitionResult + """ + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TextOperationStatusCodes'}, + 'recognition_result': {'key': 'recognitionResult', 'type': 'RecognitionResult'}, + } + + def __init__(self, *, status=None, recognition_result=None, **kwargs) -> None: + super(TextOperationResult, self).__init__(**kwargs) + self.status = status + self.recognition_result = recognition_result diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word.py index e5356b053373..af6015b9ed0d 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word.py @@ -26,7 +26,7 @@ class Word(Model): 'text': {'key': 'text', 'type': 'str'}, } - def __init__(self, bounding_box=None, text=None): - super(Word, self).__init__() - self.bounding_box = bounding_box - self.text = text + def __init__(self, **kwargs): + super(Word, self).__init__(**kwargs) + self.bounding_box = kwargs.get('bounding_box', None) + self.text = kwargs.get('text', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word_py3.py new file mode 100644 index 000000000000..ea3dc0845b3e --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/word_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Word(Model): + """Word. + + :param bounding_box: + :type bounding_box: list[int] + :param text: + :type text: str + """ + + _attribute_map = { + 'bounding_box': {'key': 'boundingBox', 'type': '[int]'}, + 'text': {'key': 'text', 'type': 'str'}, + } + + def __init__(self, *, bounding_box=None, text: str=None, **kwargs) -> None: + super(Word, self).__init__(**kwargs) + self.bounding_box = bounding_box + self.text = text diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py index e0ec669828cb..63d89bfb54fa 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "0.1.0" +VERSION = "1.0" From 62c4025b42abc6faea8f5411851c5aacdfee51fc Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Tue, 5 Jun 2018 10:23:40 -0700 Subject: [PATCH 2/7] Generated from bd6c66b615b94480a1458dde3a8bc42f3a689548 (#2645) ComputerVision - collection of fixes * fix ocr language parameter to fix [bug](https://github.com/Azure/azure-sdk-for-net/issues/4083) * language parameter missing for /tag path * language parameter missing for /models/*/analyze path * make /models/*/analyze return a more generic type * add specific types for /models/*/analyze to return for current models --- .../computervision/computer_vision_api.py | 38 +++++++++++++++---- .../vision/computervision/models/__init__.py | 11 +++++- .../models/celebrity_results.py | 38 +++++++++++++++++++ .../models/celebrity_results_py3.py | 38 +++++++++++++++++++ .../models/computer_vision_api_enums.py | 6 --- .../models/domain_model_results.py | 10 ++--- .../models/domain_model_results_py3.py | 12 +++--- .../computervision/models/landmark_results.py | 38 +++++++++++++++++++ .../models/landmark_results_landmarks_item.py | 32 ++++++++++++++++ .../landmark_results_landmarks_item_py3.py | 32 ++++++++++++++++ .../models/landmark_results_py3.py | 38 +++++++++++++++++++ .../computervision/models/ocr_result.py | 7 ++-- .../computervision/models/ocr_result_py3.py | 9 ++--- 13 files changed, 271 insertions(+), 38 deletions(-) create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrity_results.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrity_results_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item_py3.py create mode 100644 azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_py3.py diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py index db22d1d994e0..37c45a8a14e1 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py @@ -548,7 +548,7 @@ def tag_image( tag_image.metadata = {'url': '/tag'} def analyze_image_by_domain( - self, model, url, custom_headers=None, raw=False, **operation_config): + self, model, url, language="en", custom_headers=None, raw=False, **operation_config): """This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models @@ -558,12 +558,16 @@ def analyze_image_by_domain( returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. - :param model: The domain-specific content to recognize. Possible - values include: 'Celebrities', 'Landmarks' - :type model: str or - ~azure.cognitiveservices.vision.computervision.models.DomainModels + :param model: The domain-specific content to recognize. + :type model: str :param url: Publicly reachable URL of an image :type url: str + :param language: The desired language for output generation. If this + parameter is not specified, the default value is + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -582,12 +586,14 @@ def analyze_image_by_domain( url = self.analyze_image_by_domain.metadata['url'] path_format_arguments = { 'AzureRegion': self._serialize.url("self.config.azure_region", self.config.azure_region, 'AzureRegions', skip_quote=True), - 'model': self._serialize.url("model", model, 'DomainModels') + 'model': self._serialize.url("model", model, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -1077,7 +1083,7 @@ def describe_image_in_stream( describe_image_in_stream.metadata = {'url': '/describe'} def tag_image_in_stream( - self, image, custom_headers=None, raw=False, callback=None, **operation_config): + self, image, language="en", custom_headers=None, raw=False, callback=None, **operation_config): """This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in @@ -1089,6 +1095,12 @@ def tag_image_in_stream( :param image: An image stream. :type image: Generator + :param language: The desired language for output generation. If this + parameter is not specified, the default value is + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -1115,6 +1127,8 @@ def tag_image_in_stream( # Construct parameters query_parameters = {} + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -1146,7 +1160,7 @@ def tag_image_in_stream( tag_image_in_stream.metadata = {'url': '/tag'} def analyze_image_by_domain_in_stream( - self, model, image, custom_headers=None, raw=False, callback=None, **operation_config): + self, model, image, language="en", custom_headers=None, raw=False, callback=None, **operation_config): """This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models @@ -1160,6 +1174,12 @@ def analyze_image_by_domain_in_stream( :type model: str :param image: An image stream. :type image: Generator + :param language: The desired language for output generation. If this + parameter is not specified, the default value is + "en".Supported languages:en - English, Default.ja - Japanese + pt - Portuguese zh - Simplified Chinese. Possible values include: + 'en', 'ja', 'pt', 'zh' + :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -1187,6 +1207,8 @@ def analyze_image_by_domain_in_stream( # Construct parameters query_parameters = {} + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py index 663008ab4f8c..f0dde33bd544 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py @@ -34,6 +34,9 @@ from .model_description_py3 import ModelDescription from .list_models_result_py3 import ListModelsResult from .domain_model_results_py3 import DomainModelResults + from .celebrity_results_py3 import CelebrityResults + from .landmark_results_landmarks_item_py3 import LandmarkResultsLandmarksItem + from .landmark_results_py3 import LandmarkResults from .image_description_py3 import ImageDescription from .tag_result_py3 import TagResult from .computer_vision_error_py3 import ComputerVisionError, ComputerVisionErrorException @@ -63,6 +66,9 @@ from .model_description import ModelDescription from .list_models_result import ListModelsResult from .domain_model_results import DomainModelResults + from .celebrity_results import CelebrityResults + from .landmark_results_landmarks_item import LandmarkResultsLandmarksItem + from .landmark_results import LandmarkResults from .image_description import ImageDescription from .tag_result import TagResult from .computer_vision_error import ComputerVisionError, ComputerVisionErrorException @@ -75,7 +81,6 @@ OcrLanguages, AzureRegions, Details, - DomainModels, ) __all__ = [ @@ -103,6 +108,9 @@ 'ModelDescription', 'ListModelsResult', 'DomainModelResults', + 'CelebrityResults', + 'LandmarkResultsLandmarksItem', + 'LandmarkResults', 'ImageDescription', 'TagResult', 'ComputerVisionError', 'ComputerVisionErrorException', @@ -114,5 +122,4 @@ 'OcrLanguages', 'AzureRegions', 'Details', - 'DomainModels', ] diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrity_results.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrity_results.py new file mode 100644 index 000000000000..84b07c34ecbc --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrity_results.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CelebrityResults(Model): + """List of celebrities recognized in the image. + + :param celebrities: + :type celebrities: + list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, **kwargs): + super(CelebrityResults, self).__init__(**kwargs) + self.celebrities = kwargs.get('celebrities', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrity_results_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrity_results_py3.py new file mode 100644 index 000000000000..b6edc077463e --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/celebrity_results_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CelebrityResults(Model): + """List of celebrities recognized in the image. + + :param celebrities: + :type celebrities: + list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, celebrities=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(CelebrityResults, self).__init__(**kwargs) + self.celebrities = celebrities + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py index e8ce61ec14bb..76ff22284087 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py @@ -105,9 +105,3 @@ class Details(str, Enum): celebrities = "Celebrities" landmarks = "Landmarks" - - -class DomainModels(str, Enum): - - celebrities = "Celebrities" - landmarks = "Landmarks" diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py index 4093d33791b5..dba10b4cb224 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py @@ -16,10 +16,8 @@ class DomainModelResults(Model): """Result of image analysis using a specific domain model including additional metadata. - :param celebrities: An array of possible celebritied identified in the - image. - :type celebrities: - list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + :param result: Model-specific response + :type result: object :param request_id: Id of the REST API request. :type request_id: str :param metadata: @@ -28,13 +26,13 @@ class DomainModelResults(Model): """ _attribute_map = { - 'celebrities': {'key': 'result.celebrities', 'type': '[CelebritiesModel]'}, + 'result': {'key': 'result', 'type': 'object'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, **kwargs): super(DomainModelResults, self).__init__(**kwargs) - self.celebrities = kwargs.get('celebrities', None) + self.result = kwargs.get('result', None) self.request_id = kwargs.get('request_id', None) self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py index ea639d2d2fc8..4b718b2258a7 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results_py3.py @@ -16,10 +16,8 @@ class DomainModelResults(Model): """Result of image analysis using a specific domain model including additional metadata. - :param celebrities: An array of possible celebritied identified in the - image. - :type celebrities: - list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + :param result: Model-specific response + :type result: object :param request_id: Id of the REST API request. :type request_id: str :param metadata: @@ -28,13 +26,13 @@ class DomainModelResults(Model): """ _attribute_map = { - 'celebrities': {'key': 'result.celebrities', 'type': '[CelebritiesModel]'}, + 'result': {'key': 'result', 'type': 'object'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, *, celebrities=None, request_id: str=None, metadata=None, **kwargs) -> None: + def __init__(self, *, result=None, request_id: str=None, metadata=None, **kwargs) -> None: super(DomainModelResults, self).__init__(**kwargs) - self.celebrities = celebrities + self.result = result self.request_id = request_id self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results.py new file mode 100644 index 000000000000..14bd0e21d982 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LandmarkResults(Model): + """List of landmarks recognized in the image. + + :param landmarks: + :type landmarks: + list[~azure.cognitiveservices.vision.computervision.models.LandmarkResultsLandmarksItem] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'landmarks': {'key': 'landmarks', 'type': '[LandmarkResultsLandmarksItem]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, **kwargs): + super(LandmarkResults, self).__init__(**kwargs) + self.landmarks = kwargs.get('landmarks', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item.py new file mode 100644 index 000000000000..fd4de1c7afbc --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LandmarkResultsLandmarksItem(Model): + """A landmark recognized in the image. + + :param name: Name of the landmark. + :type name: str + :param confidence: Confidence level for the landmark recognition. + :type confidence: float + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(LandmarkResultsLandmarksItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.confidence = kwargs.get('confidence', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item_py3.py new file mode 100644 index 000000000000..69efe9302103 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LandmarkResultsLandmarksItem(Model): + """A landmark recognized in the image. + + :param name: Name of the landmark. + :type name: str + :param confidence: Confidence level for the landmark recognition. + :type confidence: float + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, name: str=None, confidence: float=None, **kwargs) -> None: + super(LandmarkResultsLandmarksItem, self).__init__(**kwargs) + self.name = name + self.confidence = confidence diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_py3.py new file mode 100644 index 000000000000..1df503e9eccf --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LandmarkResults(Model): + """List of landmarks recognized in the image. + + :param landmarks: + :type landmarks: + list[~azure.cognitiveservices.vision.computervision.models.LandmarkResultsLandmarksItem] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'landmarks': {'key': 'landmarks', 'type': '[LandmarkResultsLandmarksItem]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, landmarks=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(LandmarkResults, self).__init__(**kwargs) + self.landmarks = landmarks + self.request_id = request_id + self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py index eb573d1338e7..854514692604 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result.py @@ -15,9 +15,8 @@ class OcrResult(Model): """OcrResult. - :param language: - :type language: - ~azure.cognitiveservices.vision.computervision.models.OcrResult + :param language: The BCP-47 language code of the text in the image. + :type language: str :param text_angle: The angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become @@ -41,7 +40,7 @@ class OcrResult(Model): """ _attribute_map = { - 'language': {'key': 'language', 'type': 'OcrResult'}, + 'language': {'key': 'language', 'type': 'str'}, 'text_angle': {'key': 'textAngle', 'type': 'float'}, 'orientation': {'key': 'orientation', 'type': 'str'}, 'regions': {'key': 'regions', 'type': '[OcrRegion]'}, diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py index 413fb9637d4e..6d654eac5891 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/ocr_result_py3.py @@ -15,9 +15,8 @@ class OcrResult(Model): """OcrResult. - :param language: - :type language: - ~azure.cognitiveservices.vision.computervision.models.OcrResult + :param language: The BCP-47 language code of the text in the image. + :type language: str :param text_angle: The angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become @@ -41,13 +40,13 @@ class OcrResult(Model): """ _attribute_map = { - 'language': {'key': 'language', 'type': 'OcrResult'}, + 'language': {'key': 'language', 'type': 'str'}, 'text_angle': {'key': 'textAngle', 'type': 'float'}, 'orientation': {'key': 'orientation', 'type': 'str'}, 'regions': {'key': 'regions', 'type': '[OcrRegion]'}, } - def __init__(self, *, language=None, text_angle: float=None, orientation: str=None, regions=None, **kwargs) -> None: + def __init__(self, *, language: str=None, text_angle: float=None, orientation: str=None, regions=None, **kwargs) -> None: super(OcrResult, self).__init__(**kwargs) self.language = language self.text_angle = text_angle From 49ab98bf802b000ed5f2a82dfdd03a2e352392c0 Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Tue, 12 Jun 2018 17:12:42 -0700 Subject: [PATCH 3/7] Generated from b35c48945774ac7be9bf0fa05e19c0890141bb3d (#2731) Add Spanish as an option as ServiceLanguage for ComputerVision. --- .../computervision/computer_vision_api.py | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py index 37c45a8a14e1..3d79fee9607d 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py @@ -169,9 +169,9 @@ def analyze_image( ~azure.cognitiveservices.vision.computervision.models.Details] :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.ja - Japanese - pt - Portuguese zh - Simplified Chinese. Possible values include: - 'en', 'ja', 'pt', 'zh' + "en".Supported languages:en - English, Default. es - + Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -412,9 +412,9 @@ def describe_image( :type max_candidates: str :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.ja - Japanese - pt - Portuguese zh - Simplified Chinese. Possible values include: - 'en', 'ja', 'pt', 'zh' + "en".Supported languages:en - English, Default. es - + Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -488,9 +488,9 @@ def tag_image( :type url: str :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.ja - Japanese - pt - Portuguese zh - Simplified Chinese. Possible values include: - 'en', 'ja', 'pt', 'zh' + "en".Supported languages:en - English, Default. es - + Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -564,9 +564,9 @@ def analyze_image_by_domain( :type url: str :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.ja - Japanese - pt - Portuguese zh - Simplified Chinese. Possible values include: - 'en', 'ja', 'pt', 'zh' + "en".Supported languages:en - English, Default. es - + Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -771,9 +771,9 @@ def analyze_image_in_stream( :type details: str :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.ja - Japanese - pt - Portuguese zh - Simplified Chinese. Possible values include: - 'en', 'ja', 'pt', 'zh' + "en".Supported languages:en - English, Default. es - + Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1018,9 +1018,9 @@ def describe_image_in_stream( :type max_candidates: str :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.ja - Japanese - pt - Portuguese zh - Simplified Chinese. Possible values include: - 'en', 'ja', 'pt', 'zh' + "en".Supported languages:en - English, Default. es - + Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1097,9 +1097,9 @@ def tag_image_in_stream( :type image: Generator :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.ja - Japanese - pt - Portuguese zh - Simplified Chinese. Possible values include: - 'en', 'ja', 'pt', 'zh' + "en".Supported languages:en - English, Default. es - + Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1176,9 +1176,9 @@ def analyze_image_by_domain_in_stream( :type image: Generator :param language: The desired language for output generation. If this parameter is not specified, the default value is - "en".Supported languages:en - English, Default.ja - Japanese - pt - Portuguese zh - Simplified Chinese. Possible values include: - 'en', 'ja', 'pt', 'zh' + "en".Supported languages:en - English, Default. es - + Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. + Possible values include: 'en', 'es', 'ja', 'pt', 'zh' :type language: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the From bee4351c6a885068f0bc6999985077b325a41513 Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Fri, 15 Jun 2018 10:39:36 -0700 Subject: [PATCH 4/7] [AutoPR cognitiveservices/data-plane/ComputerVision] Update CognitiveService ComputerVision API to V2 (#2747) * Generated from c2567da015994dac3301f495b6c5011d28ceaffe Add post-processing directive to swap argument order for RecognizeText to position the url argument ahead of the mode argument. autorest does not generate the intended order, so we add a post-processing directive, for each target language, * Generated from 973322cfd6ec2ced60a732f53b5318ceded5de7b Fix validation errors * Generated from 70efe042d607cbd973734e432da395ed35191a03 Add Spanish support for ServiceLanguage This is for parity with V1. --- .../computervision/computer_vision_api.py | 36 +++++++++---------- .../vision/computervision/models/__init__.py | 14 ++++---- .../computervision/models/category_detail.py | 5 +++ .../models/category_detail_py3.py | 7 +++- .../models/computer_vision_api_enums.py | 6 ++++ .../models/image_description.py | 4 +-- .../models/image_description_details.py | 9 ----- .../models/image_description_details_py3.py | 11 +----- .../models/image_description_py3.py | 4 +-- .../computervision/models/landmark_results.py | 4 +-- .../models/landmark_results_py3.py | 4 +-- ...s_landmarks_item.py => landmarks_model.py} | 4 +-- ...rks_item_py3.py => landmarks_model_py3.py} | 4 +-- .../vision/computervision/version.py | 2 +- 14 files changed, 56 insertions(+), 58 deletions(-) rename azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/{landmark_results_landmarks_item.py => landmarks_model.py} (89%) rename azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/{landmark_results_landmarks_item_py3.py => landmarks_model_py3.py} (90%) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py index 3d79fee9607d..e6999202c26b 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/computer_vision_api.py @@ -41,7 +41,7 @@ def __init__( raise ValueError("Parameter 'azure_region' must not be None.") if credentials is None: raise ValueError("Parameter 'credentials' must not be None.") - base_url = 'https://{AzureRegion}.api.cognitive.microsoft.com/vision/v1.0' + base_url = 'https://{AzureRegion}.api.cognitive.microsoft.com/vision/v2.0' super(ComputerVisionAPIConfiguration, self).__init__(base_url) @@ -76,7 +76,7 @@ def __init__( super(ComputerVisionAPI, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '1.0' + self.api_version = '2.0' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) @@ -625,18 +625,18 @@ def analyze_image_by_domain( analyze_image_by_domain.metadata = {'url': '/models/{model}/analyze'} def recognize_text( - self, url, detect_handwriting=False, custom_headers=None, raw=False, **operation_config): + self, url, mode, custom_headers=None, raw=False, **operation_config): """Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your - Get Handwritten Text Operation Result operation. + Get Recognize Text Operation Result operation. + :param mode: Type of text to recognize. Possible values include: + 'Handwritten', 'Printed' + :type mode: str or + ~azure.cognitiveservices.vision.computervision.models.TextRecognitionMode :param url: Publicly reachable URL of an image :type url: str - :param detect_handwriting: If 'true' is specified, handwriting - recognition is performed. If this parameter is set to 'false' or is - not specified, printed text recognition is performed. - :type detect_handwriting: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -658,8 +658,7 @@ def recognize_text( # Construct parameters query_parameters = {} - if detect_handwriting is not None: - query_parameters['detectHandwriting'] = self._serialize.query("detect_handwriting", detect_handwriting, 'bool') + query_parameters['mode'] = self._serialize.query("mode", mode, 'TextRecognitionMode') # Construct headers header_parameters = {} @@ -693,7 +692,7 @@ def get_text_operation_result( returned from Recognize Text interface. :param operation_id: Id of the text operation returned in the response - of the 'Recognize Handwritten Text' + of the 'Recognize Text' :type operation_id: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1240,18 +1239,18 @@ def analyze_image_by_domain_in_stream( analyze_image_by_domain_in_stream.metadata = {'url': '/models/{model}/analyze'} def recognize_text_in_stream( - self, image, detect_handwriting=False, custom_headers=None, raw=False, callback=None, **operation_config): + self, image, mode, custom_headers=None, raw=False, callback=None, **operation_config): """Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your - Get Handwritten Text Operation Result operation. + Get Recognize Text Operation Result operation. :param image: An image stream. :type image: Generator - :param detect_handwriting: If 'true' is specified, handwriting - recognition is performed. If this parameter is set to 'false' or is - not specified, printed text recognition is performed. - :type detect_handwriting: bool + :param mode: Type of text to recognize. Possible values include: + 'Handwritten', 'Printed' + :type mode: str or + ~azure.cognitiveservices.vision.computervision.models.TextRecognitionMode :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -1276,8 +1275,7 @@ def recognize_text_in_stream( # Construct parameters query_parameters = {} - if detect_handwriting is not None: - query_parameters['detectHandwriting'] = self._serialize.query("detect_handwriting", detect_handwriting, 'bool') + query_parameters['mode'] = self._serialize.query("mode", mode, 'TextRecognitionMode') # Construct headers header_parameters = {} diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py index f0dde33bd544..001e34518c57 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py @@ -16,6 +16,7 @@ from .text_operation_result_py3 import TextOperationResult from .face_rectangle_py3 import FaceRectangle from .celebrities_model_py3 import CelebritiesModel + from .landmarks_model_py3 import LandmarksModel from .category_detail_py3 import CategoryDetail from .category_py3 import Category from .adult_info_py3 import AdultInfo @@ -23,9 +24,9 @@ from .image_type_py3 import ImageType from .image_tag_py3 import ImageTag from .image_caption_py3 import ImageCaption - from .image_metadata_py3 import ImageMetadata from .image_description_details_py3 import ImageDescriptionDetails from .face_description_py3 import FaceDescription + from .image_metadata_py3 import ImageMetadata from .image_analysis_py3 import ImageAnalysis from .ocr_word_py3 import OcrWord from .ocr_line_py3 import OcrLine @@ -35,7 +36,6 @@ from .list_models_result_py3 import ListModelsResult from .domain_model_results_py3 import DomainModelResults from .celebrity_results_py3 import CelebrityResults - from .landmark_results_landmarks_item_py3 import LandmarkResultsLandmarksItem from .landmark_results_py3 import LandmarkResults from .image_description_py3 import ImageDescription from .tag_result_py3 import TagResult @@ -48,6 +48,7 @@ from .text_operation_result import TextOperationResult from .face_rectangle import FaceRectangle from .celebrities_model import CelebritiesModel + from .landmarks_model import LandmarksModel from .category_detail import CategoryDetail from .category import Category from .adult_info import AdultInfo @@ -55,9 +56,9 @@ from .image_type import ImageType from .image_tag import ImageTag from .image_caption import ImageCaption - from .image_metadata import ImageMetadata from .image_description_details import ImageDescriptionDetails from .face_description import FaceDescription + from .image_metadata import ImageMetadata from .image_analysis import ImageAnalysis from .ocr_word import OcrWord from .ocr_line import OcrLine @@ -67,7 +68,6 @@ from .list_models_result import ListModelsResult from .domain_model_results import DomainModelResults from .celebrity_results import CelebrityResults - from .landmark_results_landmarks_item import LandmarkResultsLandmarksItem from .landmark_results import LandmarkResults from .image_description import ImageDescription from .tag_result import TagResult @@ -79,6 +79,7 @@ ComputerVisionErrorCodes, VisualFeatureTypes, OcrLanguages, + TextRecognitionMode, AzureRegions, Details, ) @@ -90,6 +91,7 @@ 'TextOperationResult', 'FaceRectangle', 'CelebritiesModel', + 'LandmarksModel', 'CategoryDetail', 'Category', 'AdultInfo', @@ -97,9 +99,9 @@ 'ImageType', 'ImageTag', 'ImageCaption', - 'ImageMetadata', 'ImageDescriptionDetails', 'FaceDescription', + 'ImageMetadata', 'ImageAnalysis', 'OcrWord', 'OcrLine', @@ -109,7 +111,6 @@ 'ListModelsResult', 'DomainModelResults', 'CelebrityResults', - 'LandmarkResultsLandmarksItem', 'LandmarkResults', 'ImageDescription', 'TagResult', @@ -120,6 +121,7 @@ 'ComputerVisionErrorCodes', 'VisualFeatureTypes', 'OcrLanguages', + 'TextRecognitionMode', 'AzureRegions', 'Details', ] diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py index eefbe5b691dc..92c3e355d3e6 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail.py @@ -18,12 +18,17 @@ class CategoryDetail(Model): :param celebrities: An array of celebrities if any identified. :type celebrities: list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + :param landmarks: An array of landmarks if any identified. + :type landmarks: + list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel] """ _attribute_map = { 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, + 'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'}, } def __init__(self, **kwargs): super(CategoryDetail, self).__init__(**kwargs) self.celebrities = kwargs.get('celebrities', None) + self.landmarks = kwargs.get('landmarks', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py index b155197dab02..3e4350314499 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_detail_py3.py @@ -18,12 +18,17 @@ class CategoryDetail(Model): :param celebrities: An array of celebrities if any identified. :type celebrities: list[~azure.cognitiveservices.vision.computervision.models.CelebritiesModel] + :param landmarks: An array of landmarks if any identified. + :type landmarks: + list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel] """ _attribute_map = { 'celebrities': {'key': 'celebrities', 'type': '[CelebritiesModel]'}, + 'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'}, } - def __init__(self, *, celebrities=None, **kwargs) -> None: + def __init__(self, *, celebrities=None, landmarks=None, **kwargs) -> None: super(CategoryDetail, self).__init__(**kwargs) self.celebrities = celebrities + self.landmarks = landmarks diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py index 76ff22284087..66fb4dc38436 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py @@ -85,6 +85,12 @@ class OcrLanguages(str, Enum): sk = "sk" +class TextRecognitionMode(str, Enum): + + handwritten = "Handwritten" + printed = "Printed" + + class AzureRegions(str, Enum): westus = "westus" diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py index 6a84422b6b65..18fbae6eb962 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description.py @@ -31,8 +31,8 @@ class ImageDescription(Model): _attribute_map = { 'tags': {'key': 'description.tags', 'type': '[str]'}, 'captions': {'key': 'description.captions', 'type': '[ImageCaption]'}, - 'request_id': {'key': 'description.requestId', 'type': 'str'}, - 'metadata': {'key': 'description.metadata', 'type': 'ImageMetadata'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, **kwargs): diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py index 1e6afbb99ed7..f6b7524d1543 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details.py @@ -21,23 +21,14 @@ class ImageDescriptionDetails(Model): :param captions: A list of captions, sorted by confidence level. :type captions: list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] - :param request_id: Id of the REST API request. - :type request_id: str - :param metadata: - :type metadata: - ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _attribute_map = { 'tags': {'key': 'tags', 'type': '[str]'}, 'captions': {'key': 'captions', 'type': '[ImageCaption]'}, - 'request_id': {'key': 'requestId', 'type': 'str'}, - 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, **kwargs): super(ImageDescriptionDetails, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.captions = kwargs.get('captions', None) - self.request_id = kwargs.get('request_id', None) - self.metadata = kwargs.get('metadata', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py index 702d4ac029de..dcb990fc688e 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_details_py3.py @@ -21,23 +21,14 @@ class ImageDescriptionDetails(Model): :param captions: A list of captions, sorted by confidence level. :type captions: list[~azure.cognitiveservices.vision.computervision.models.ImageCaption] - :param request_id: Id of the REST API request. - :type request_id: str - :param metadata: - :type metadata: - ~azure.cognitiveservices.vision.computervision.models.ImageMetadata """ _attribute_map = { 'tags': {'key': 'tags', 'type': '[str]'}, 'captions': {'key': 'captions', 'type': '[ImageCaption]'}, - 'request_id': {'key': 'requestId', 'type': 'str'}, - 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } - def __init__(self, *, tags=None, captions=None, request_id: str=None, metadata=None, **kwargs) -> None: + def __init__(self, *, tags=None, captions=None, **kwargs) -> None: super(ImageDescriptionDetails, self).__init__(**kwargs) self.tags = tags self.captions = captions - self.request_id = request_id - self.metadata = metadata diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py index 3ec3fa9c951a..0874f3c62e01 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_description_py3.py @@ -31,8 +31,8 @@ class ImageDescription(Model): _attribute_map = { 'tags': {'key': 'description.tags', 'type': '[str]'}, 'captions': {'key': 'description.captions', 'type': '[ImageCaption]'}, - 'request_id': {'key': 'description.requestId', 'type': 'str'}, - 'metadata': {'key': 'description.metadata', 'type': 'ImageMetadata'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } def __init__(self, *, tags=None, captions=None, request_id: str=None, metadata=None, **kwargs) -> None: diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results.py index 14bd0e21d982..7b0c45eb32c6 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results.py @@ -17,7 +17,7 @@ class LandmarkResults(Model): :param landmarks: :type landmarks: - list[~azure.cognitiveservices.vision.computervision.models.LandmarkResultsLandmarksItem] + list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel] :param request_id: Id of the REST API request. :type request_id: str :param metadata: @@ -26,7 +26,7 @@ class LandmarkResults(Model): """ _attribute_map = { - 'landmarks': {'key': 'landmarks', 'type': '[LandmarkResultsLandmarksItem]'}, + 'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_py3.py index 1df503e9eccf..93228b4cf03f 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_py3.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_py3.py @@ -17,7 +17,7 @@ class LandmarkResults(Model): :param landmarks: :type landmarks: - list[~azure.cognitiveservices.vision.computervision.models.LandmarkResultsLandmarksItem] + list[~azure.cognitiveservices.vision.computervision.models.LandmarksModel] :param request_id: Id of the REST API request. :type request_id: str :param metadata: @@ -26,7 +26,7 @@ class LandmarkResults(Model): """ _attribute_map = { - 'landmarks': {'key': 'landmarks', 'type': '[LandmarkResultsLandmarksItem]'}, + 'landmarks': {'key': 'landmarks', 'type': '[LandmarksModel]'}, 'request_id': {'key': 'requestId', 'type': 'str'}, 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, } diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmarks_model.py similarity index 89% rename from azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item.py rename to azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmarks_model.py index fd4de1c7afbc..74ff012df89a 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmarks_model.py @@ -12,7 +12,7 @@ from msrest.serialization import Model -class LandmarkResultsLandmarksItem(Model): +class LandmarksModel(Model): """A landmark recognized in the image. :param name: Name of the landmark. @@ -27,6 +27,6 @@ class LandmarkResultsLandmarksItem(Model): } def __init__(self, **kwargs): - super(LandmarkResultsLandmarksItem, self).__init__(**kwargs) + super(LandmarksModel, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.confidence = kwargs.get('confidence', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmarks_model_py3.py similarity index 90% rename from azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item_py3.py rename to azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmarks_model_py3.py index 69efe9302103..9b7ed4502168 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmark_results_landmarks_item_py3.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/landmarks_model_py3.py @@ -12,7 +12,7 @@ from msrest.serialization import Model -class LandmarkResultsLandmarksItem(Model): +class LandmarksModel(Model): """A landmark recognized in the image. :param name: Name of the landmark. @@ -27,6 +27,6 @@ class LandmarkResultsLandmarksItem(Model): } def __init__(self, *, name: str=None, confidence: float=None, **kwargs) -> None: - super(LandmarkResultsLandmarksItem, self).__init__(**kwargs) + super(LandmarksModel, self).__init__(**kwargs) self.name = name self.confidence = confidence diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py index 63d89bfb54fa..63f83465c874 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0" +VERSION = "2.0" From 6b6469b30ba88c7bc9331088d739ca3dd404f802 Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Wed, 20 Jun 2018 10:43:28 -0700 Subject: [PATCH 5/7] Generated from 6492fb4e04a3e5d7330ceccbb9f67e2a839e2290 (#2775) ComputerVision add tag.hint Hints provided additional info for whole-image analysis tags. This PR is to make the Swagger match the service behavior. --- .../vision/computervision/models/image_tag.py | 4 ++++ .../vision/computervision/models/image_tag_py3.py | 6 +++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py index 93f349e9d4c0..8e981d8e6121 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag.py @@ -19,14 +19,18 @@ class ImageTag(Model): :type name: str :param confidence: The level of confidence the service has in the caption :type confidence: float + :param hint: Optional categorization for the tag + :type hint: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, + 'hint': {'key': 'hint', 'type': 'str'}, } def __init__(self, **kwargs): super(ImageTag, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.confidence = kwargs.get('confidence', None) + self.hint = kwargs.get('hint', None) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py index ed598af42843..62ba57b775b2 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/image_tag_py3.py @@ -19,14 +19,18 @@ class ImageTag(Model): :type name: str :param confidence: The level of confidence the service has in the caption :type confidence: float + :param hint: Optional categorization for the tag + :type hint: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'confidence': {'key': 'confidence', 'type': 'float'}, + 'hint': {'key': 'hint', 'type': 'str'}, } - def __init__(self, *, name: str=None, confidence: float=None, **kwargs) -> None: + def __init__(self, *, name: str=None, confidence: float=None, hint: str=None, **kwargs) -> None: super(ImageTag, self).__init__(**kwargs) self.name = name self.confidence = confidence + self.hint = hint From 4902e741d2bb2562eb6824c8d86a85490868bca3 Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Thu, 21 Jun 2018 16:55:28 -0700 Subject: [PATCH 6/7] Update version.py --- .../azure/cognitiveservices/vision/computervision/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py index 63f83465c874..9bd1dfac7ecb 100644 --- a/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py +++ b/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "2.0" +VERSION = "0.2.0" From d5038d01e778b336f59f2525668c84a909995773 Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Fri, 22 Jun 2018 12:20:24 -0700 Subject: [PATCH 7/7] ComputerVision 0.2.0 packaging --- .../HISTORY.rst | 42 ++ .../README.rst | 16 +- .../build.json | 420 ------------------ .../sdk_packaging.toml | 6 + .../setup.py | 3 +- azure-sdk-tools/packaging_tools/__init__.py | 2 + .../packaging_tools/templates/README.rst | 8 +- 7 files changed, 64 insertions(+), 433 deletions(-) delete mode 100644 azure-cognitiveservices-vision-computervision/build.json create mode 100644 azure-cognitiveservices-vision-computervision/sdk_packaging.toml diff --git a/azure-cognitiveservices-vision-computervision/HISTORY.rst b/azure-cognitiveservices-vision-computervision/HISTORY.rst index 09cb4bc66645..399421196d94 100644 --- a/azure-cognitiveservices-vision-computervision/HISTORY.rst +++ b/azure-cognitiveservices-vision-computervision/HISTORY.rst @@ -3,6 +3,48 @@ Release History =============== +0.2.0 (2018-06-22) +++++++++++++++++++ + +**Features** + +- analyze_image now support 'en', 'es', 'ja', 'pt', 'zh' (including "in_stream" version of these operations) +- describe_image/tag_image/analyze_image_by_domain now support the language parameter (including "in_stream" version of these operations) +- Client class can be used as a context manager to keep the underlying HTTP session open for performance + +**Bug fixes** + +- Fix several invalid JSON description, that was raising unexpected exceptions (including OCRResult from bug #2614) + +**Breaking changes** + +- recognize_text "detect_handwriting" boolean is now a "mode" str between 'Handwritten' and 'Printed' + +**General Breaking changes** + +This version uses a next-generation code generator that *might* introduce breaking changes. + +- Model signatures now use only keyword-argument syntax. All positional arguments must be re-written as keyword-arguments. + To keep auto-completion in most cases, models are now generated for Python 2 and Python 3. Python 3 uses the "*" syntax for keyword-only arguments. +- Enum types now use the "str" mixin (class AzureEnum(str, Enum)) to improve the behavior when unrecognized enum values are encountered. + While this is not a breaking change, the distinctions are important, and are documented here: + https://docs.python.org/3/library/enum.html#others + At a glance: + + - "is" should not be used at all. + - "format" will return the string value, where "%s" string formatting will return `NameOfEnum.stringvalue`. Format syntax should be prefered. + +- New Long Running Operation: + + - Return type changes from `msrestazure.azure_operation.AzureOperationPoller` to `msrest.polling.LROPoller`. External API is the same. + - Return type is now **always** a `msrest.polling.LROPoller`, regardless of the optional parameters used. + - The behavior has changed when using `raw=True`. Instead of returning the initial call result as `ClientRawResponse`, + without polling, now this returns an LROPoller. After polling, the final resource will be returned as a `ClientRawResponse`. + - New `polling` parameter. The default behavior is `Polling=True` which will poll using ARM algorithm. When `Polling=False`, + the response of the initial call will be returned without polling. + - `polling` parameter accepts instances of subclasses of `msrest.polling.PollingMethod`. + - `add_done_callback` will no longer raise if called after polling is finished, but will instead execute the callback right away. + 0.1.0 (2018-01-23) ++++++++++++++++++ diff --git a/azure-cognitiveservices-vision-computervision/README.rst b/azure-cognitiveservices-vision-computervision/README.rst index 988da510b326..ee1ba86cf7c0 100644 --- a/azure-cognitiveservices-vision-computervision/README.rst +++ b/azure-cognitiveservices-vision-computervision/README.rst @@ -3,13 +3,7 @@ Microsoft Azure SDK for Python This is the Microsoft Azure Cognitive Services Computer Vision Client Library. -Azure Resource Manager (ARM) is the next generation of management APIs that -replace the old Azure Service Management (ASM). - -This package has been tested with Python 2.7, 3.3, 3.4, 3.5 and 3.6. - -For the older Azure Service Management (ASM) libraries, see -`azure-servicemanagement-legacy `__ library. +This package has been tested with Python 2.7, 3.4, 3.5 and 3.6. For a more complete set of Azure libraries, see the `azure `__ bundle package. @@ -33,6 +27,14 @@ If you see azure==0.11.0 (or any version below 1.0), uninstall it first: pip uninstall azure +Usage +===== + +For code examples, see `Cognitive Services Computer Vision +`__ +on docs.microsoft.com. + + Provide Feedback ================ diff --git a/azure-cognitiveservices-vision-computervision/build.json b/azure-cognitiveservices-vision-computervision/build.json deleted file mode 100644 index 94bc647f855d..000000000000 --- a/azure-cognitiveservices-vision-computervision/build.json +++ /dev/null @@ -1,420 +0,0 @@ -{ - "autorest": [ - { - "resolvedInfo": null, - "packageMetadata": { - "name": "@microsoft.azure/autorest-core", - "version": "2.0.4228", - "engines": { - "node": ">=7.10.0" - }, - "dependencies": {}, - "optionalDependencies": {}, - "devDependencies": { - "@types/commonmark": "^0.27.0", - "@types/js-yaml": "^3.10.0", - "@types/jsonpath": "^0.1.29", - "@types/node": "^8.0.53", - "@types/source-map": "^0.5.0", - "@types/yargs": "^8.0.2", - "@types/z-schema": "^3.16.31", - "dts-generator": "^2.1.0", - "mocha": "^4.0.1", - "mocha-typescript": "^1.1.7", - "shx": "0.2.2", - "static-link": "^0.2.3", - "vscode-jsonrpc": "^3.3.1" - }, - "bundleDependencies": false, - "peerDependencies": {}, - "deprecated": false, - "_resolved": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest-core@2.0.4228/node_modules/@microsoft.azure/autorest-core", - "_shasum": "b3897b8615417aa07cf9113d4bd18862b32f82f8", - "_shrinkwrap": null, - "bin": { - "autorest-core": "./dist/app.js", - "autorest-language-service": "dist/language-service/language-service.js" - }, - "_id": "@microsoft.azure/autorest-core@2.0.4228", - "_from": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest-core@2.0.4228/node_modules/@microsoft.azure/autorest-core", - "_requested": { - "type": "directory", - "where": "D:\\VSProjects\\swagger-to-sdk", - "raw": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest-core@2.0.4228\\node_modules\\@microsoft.azure\\autorest-core", - "rawSpec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest-core@2.0.4228\\node_modules\\@microsoft.azure\\autorest-core", - "saveSpec": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest-core@2.0.4228/node_modules/@microsoft.azure/autorest-core", - "fetchSpec": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest-core@2.0.4228/node_modules/@microsoft.azure/autorest-core" - }, - "_spec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest-core@2.0.4228\\node_modules\\@microsoft.azure\\autorest-core", - "_where": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest-core@2.0.4228\\node_modules\\@microsoft.azure\\autorest-core" - }, - "extensionManager": { - "installationPath": "C:\\Users\\lmazuel\\.autorest", - "sharedLock": { - "name": "C:\\Users\\lmazuel\\.autorest", - "exclusiveLock": { - "name": "C__Users_lmazuel_.autorest.exclusive-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "exclusive": true - } - }, - "busyLock": { - "name": "C__Users_lmazuel_.autorest.busy-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "exclusive": true - } - }, - "personalLock": { - "name": "C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "exclusive": true - } - }, - "file": "C:\\Users\\lmazuel\\AppData\\Local\\Temp/C__Users_lmazuel_.autorest.lock" - }, - "dotnetPath": "C:\\Users\\lmazuel\\.dotnet" - }, - "installationPath": "C:\\Users\\lmazuel\\.autorest" - }, - { - "resolvedInfo": null, - "packageMetadata": { - "name": "@microsoft.azure/autorest-core", - "version": "2.0.4230", - "engines": { - "node": ">=7.10.0" - }, - "dependencies": {}, - "optionalDependencies": {}, - "devDependencies": { - "@types/commonmark": "^0.27.0", - "@types/js-yaml": "^3.10.0", - "@types/jsonpath": "^0.1.29", - "@types/node": "^8.0.53", - "@types/source-map": "0.5.0", - "@types/yargs": "^8.0.2", - "@types/z-schema": "^3.16.31", - "dts-generator": "^2.1.0", - "mocha": "^4.0.1", - "mocha-typescript": "^1.1.7", - "shx": "0.2.2", - "static-link": "^0.2.3", - "vscode-jsonrpc": "^3.3.1" - }, - "bundleDependencies": false, - "peerDependencies": {}, - "deprecated": false, - "_resolved": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest-core@2.0.4230/node_modules/@microsoft.azure/autorest-core", - "_shasum": "3c9b387fbe18ce3a54b68bc0c24ddb0a4c850f25", - "_shrinkwrap": null, - "bin": { - "autorest-core": "./dist/app.js", - "autorest-language-service": "dist/language-service/language-service.js" - }, - "_id": "@microsoft.azure/autorest-core@2.0.4230", - "_from": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest-core@2.0.4230/node_modules/@microsoft.azure/autorest-core", - "_requested": { - "type": "directory", - "where": "D:\\VSProjects\\swagger-to-sdk", - "raw": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest-core@2.0.4230\\node_modules\\@microsoft.azure\\autorest-core", - "rawSpec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest-core@2.0.4230\\node_modules\\@microsoft.azure\\autorest-core", - "saveSpec": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest-core@2.0.4230/node_modules/@microsoft.azure/autorest-core", - "fetchSpec": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest-core@2.0.4230/node_modules/@microsoft.azure/autorest-core" - }, - "_spec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest-core@2.0.4230\\node_modules\\@microsoft.azure\\autorest-core", - "_where": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest-core@2.0.4230\\node_modules\\@microsoft.azure\\autorest-core" - }, - "extensionManager": { - "installationPath": "C:\\Users\\lmazuel\\.autorest", - "sharedLock": { - "name": "C:\\Users\\lmazuel\\.autorest", - "exclusiveLock": { - "name": "C__Users_lmazuel_.autorest.exclusive-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "exclusive": true - } - }, - "busyLock": { - "name": "C__Users_lmazuel_.autorest.busy-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "exclusive": true - } - }, - "personalLock": { - "name": "C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "exclusive": true - } - }, - "file": "C:\\Users\\lmazuel\\AppData\\Local\\Temp/C__Users_lmazuel_.autorest.lock" - }, - "dotnetPath": "C:\\Users\\lmazuel\\.dotnet" - }, - "installationPath": "C:\\Users\\lmazuel\\.autorest" - }, - { - "resolvedInfo": null, - "packageMetadata": { - "name": "@microsoft.azure/autorest.modeler", - "version": "2.3.38", - "dependencies": { - "dotnet-2.0.0": "^1.4.4" - }, - "optionalDependencies": {}, - "devDependencies": { - "@microsoft.azure/autorest.testserver": "2.3.1", - "autorest": "^2.0.4201", - "coffee-script": "^1.11.1", - "dotnet-sdk-2.0.0": "^1.4.4", - "gulp": "^3.9.1", - "gulp-filter": "^5.0.0", - "gulp-line-ending-corrector": "^1.0.1", - "iced-coffee-script": "^108.0.11", - "marked": "^0.3.6", - "marked-terminal": "^2.0.0", - "moment": "^2.17.1", - "run-sequence": "*", - "shx": "^0.2.2", - "through2-parallel": "^0.1.3", - "yargs": "^8.0.2", - "yarn": "^1.0.2" - }, - "bundleDependencies": false, - "peerDependencies": {}, - "deprecated": false, - "_resolved": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", - "_shasum": "903bb77932e4ed1b8bc3b25cc39b167143494f6c", - "_shrinkwrap": null, - "bin": null, - "_id": "@microsoft.azure/autorest.modeler@2.3.38", - "_from": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", - "_requested": { - "type": "directory", - "where": "D:\\VSProjects\\swagger-to-sdk", - "raw": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.modeler@2.3.38\\node_modules\\@microsoft.azure\\autorest.modeler", - "rawSpec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.modeler@2.3.38\\node_modules\\@microsoft.azure\\autorest.modeler", - "saveSpec": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", - "fetchSpec": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler" - }, - "_spec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.modeler@2.3.38\\node_modules\\@microsoft.azure\\autorest.modeler", - "_where": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.modeler@2.3.38\\node_modules\\@microsoft.azure\\autorest.modeler" - }, - "extensionManager": { - "installationPath": "C:\\Users\\lmazuel\\.autorest", - "sharedLock": { - "name": "C:\\Users\\lmazuel\\.autorest", - "exclusiveLock": { - "name": "C__Users_lmazuel_.autorest.exclusive-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "exclusive": true - } - }, - "busyLock": { - "name": "C__Users_lmazuel_.autorest.busy-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "exclusive": true - } - }, - "personalLock": { - "name": "C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "exclusive": true - } - }, - "file": "C:\\Users\\lmazuel\\AppData\\Local\\Temp/C__Users_lmazuel_.autorest.lock" - }, - "dotnetPath": "C:\\Users\\lmazuel\\.dotnet" - }, - "installationPath": "C:\\Users\\lmazuel\\.autorest" - }, - { - "resolvedInfo": null, - "packageMetadata": { - "name": "@microsoft.azure/autorest.modeler", - "version": "2.3.44", - "dependencies": { - "dotnet-2.0.0": "^1.4.4" - }, - "optionalDependencies": {}, - "devDependencies": { - "@microsoft.azure/autorest.testserver": "2.3.17", - "autorest": "^2.0.4225", - "coffee-script": "^1.11.1", - "dotnet-sdk-2.0.0": "^1.4.4", - "gulp": "^3.9.1", - "gulp-filter": "^5.0.0", - "gulp-line-ending-corrector": "^1.0.1", - "iced-coffee-script": "^108.0.11", - "marked": "^0.3.6", - "marked-terminal": "^2.0.0", - "moment": "^2.17.1", - "run-sequence": "*", - "shx": "^0.2.2", - "through2-parallel": "^0.1.3", - "yargs": "^8.0.2", - "yarn": "^1.0.2" - }, - "bundleDependencies": false, - "peerDependencies": {}, - "deprecated": false, - "_resolved": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", - "_shasum": "9b5a880a77467be33a77f002f03230d3ccc21266", - "_shrinkwrap": null, - "bin": null, - "_id": "@microsoft.azure/autorest.modeler@2.3.44", - "_from": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", - "_requested": { - "type": "directory", - "where": "D:\\VSProjects\\swagger-to-sdk", - "raw": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.modeler@2.3.44\\node_modules\\@microsoft.azure\\autorest.modeler", - "rawSpec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.modeler@2.3.44\\node_modules\\@microsoft.azure\\autorest.modeler", - "saveSpec": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", - "fetchSpec": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler" - }, - "_spec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.modeler@2.3.44\\node_modules\\@microsoft.azure\\autorest.modeler", - "_where": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.modeler@2.3.44\\node_modules\\@microsoft.azure\\autorest.modeler" - }, - "extensionManager": { - "installationPath": "C:\\Users\\lmazuel\\.autorest", - "sharedLock": { - "name": "C:\\Users\\lmazuel\\.autorest", - "exclusiveLock": { - "name": "C__Users_lmazuel_.autorest.exclusive-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "exclusive": true - } - }, - "busyLock": { - "name": "C__Users_lmazuel_.autorest.busy-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "exclusive": true - } - }, - "personalLock": { - "name": "C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "exclusive": true - } - }, - "file": "C:\\Users\\lmazuel\\AppData\\Local\\Temp/C__Users_lmazuel_.autorest.lock" - }, - "dotnetPath": "C:\\Users\\lmazuel\\.dotnet" - }, - "installationPath": "C:\\Users\\lmazuel\\.autorest" - }, - { - "resolvedInfo": null, - "packageMetadata": { - "name": "@microsoft.azure/autorest.python", - "version": "2.1.34", - "dependencies": { - "dotnet-2.0.0": "^1.4.4" - }, - "optionalDependencies": {}, - "devDependencies": { - "@microsoft.azure/autorest.testserver": "^2.3.13", - "autorest": "^2.0.4203", - "coffee-script": "^1.11.1", - "dotnet-sdk-2.0.0": "^1.4.4", - "gulp": "^3.9.1", - "gulp-filter": "^5.0.0", - "gulp-line-ending-corrector": "^1.0.1", - "iced-coffee-script": "^108.0.11", - "marked": "^0.3.6", - "marked-terminal": "^2.0.0", - "moment": "^2.17.1", - "run-sequence": "*", - "shx": "^0.2.2", - "through2-parallel": "^0.1.3", - "yargs": "^8.0.2", - "yarn": "^1.0.2" - }, - "bundleDependencies": false, - "peerDependencies": {}, - "deprecated": false, - "_resolved": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.python@2.1.34/node_modules/@microsoft.azure/autorest.python", - "_shasum": "b58d7e0542e081cf410fdbcdf8c14acf9cee16a7", - "_shrinkwrap": null, - "bin": null, - "_id": "@microsoft.azure/autorest.python@2.1.34", - "_from": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.python@2.1.34/node_modules/@microsoft.azure/autorest.python", - "_requested": { - "type": "directory", - "where": "D:\\VSProjects\\swagger-to-sdk", - "raw": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.python@2.1.34\\node_modules\\@microsoft.azure\\autorest.python", - "rawSpec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.python@2.1.34\\node_modules\\@microsoft.azure\\autorest.python", - "saveSpec": "file:C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.python@2.1.34/node_modules/@microsoft.azure/autorest.python", - "fetchSpec": "C:/Users/lmazuel/.autorest/@microsoft.azure_autorest.python@2.1.34/node_modules/@microsoft.azure/autorest.python" - }, - "_spec": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.python@2.1.34\\node_modules\\@microsoft.azure\\autorest.python", - "_where": "C:\\Users\\lmazuel\\.autorest\\@microsoft.azure_autorest.python@2.1.34\\node_modules\\@microsoft.azure\\autorest.python" - }, - "extensionManager": { - "installationPath": "C:\\Users\\lmazuel\\.autorest", - "sharedLock": { - "name": "C:\\Users\\lmazuel\\.autorest", - "exclusiveLock": { - "name": "C__Users_lmazuel_.autorest.exclusive-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.exclusive-lock", - "exclusive": true - } - }, - "busyLock": { - "name": "C__Users_lmazuel_.autorest.busy-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.busy-lock", - "exclusive": true - } - }, - "personalLock": { - "name": "C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "pipe": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "options": { - "path": "\\\\.\\pipe\\C__Users_lmazuel_.autorest.8624.727883310885.personal-lock", - "exclusive": true - } - }, - "file": "C:\\Users\\lmazuel\\AppData\\Local\\Temp/C__Users_lmazuel_.autorest.lock" - }, - "dotnetPath": "C:\\Users\\lmazuel\\.dotnet" - }, - "installationPath": "C:\\Users\\lmazuel\\.autorest" - } - ], - "autorest_bootstrap": { - "dependencies": { - "autorest": { - "version": "2.0.4215", - "from": "autorest@latest", - "resolved": "https://registry.npmjs.org/autorest/-/autorest-2.0.4215.tgz" - } - } - } -} \ No newline at end of file diff --git a/azure-cognitiveservices-vision-computervision/sdk_packaging.toml b/azure-cognitiveservices-vision-computervision/sdk_packaging.toml new file mode 100644 index 000000000000..069c9df58b06 --- /dev/null +++ b/azure-cognitiveservices-vision-computervision/sdk_packaging.toml @@ -0,0 +1,6 @@ +[packaging] +package_name = "azure-cognitiveservices-vision-computervision" +package_pprint_name = "Cognitive Services Computer Vision" +package_doc_id = "cognitive-services" +is_stable = false +is_arm = false diff --git a/azure-cognitiveservices-vision-computervision/setup.py b/azure-cognitiveservices-vision-computervision/setup.py index bb70be9050d9..7402c1967c37 100644 --- a/azure-cognitiveservices-vision-computervision/setup.py +++ b/azure-cognitiveservices-vision-computervision/setup.py @@ -69,7 +69,6 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', @@ -78,7 +77,7 @@ zip_safe=False, packages=find_packages(exclude=["tests"]), install_requires=[ - 'msrest>=0.4.24,<2.0.0', + 'msrest>=0.4.29,<2.0.0', 'azure-common~=1.1', ], cmdclass=cmdclass diff --git a/azure-sdk-tools/packaging_tools/__init__.py b/azure-sdk-tools/packaging_tools/__init__.py index ab7099952f52..f49b398e93ce 100644 --- a/azure-sdk-tools/packaging_tools/__init__.py +++ b/azure-sdk-tools/packaging_tools/__init__.py @@ -23,6 +23,8 @@ def build_config(config : Dict[str, Any]) -> Dict[str, str]: # Manage the nspkg package_name = result["package_name"] result["package_nspkg"] = package_name[:package_name.rindex('-')]+"-nspkg" + # ARM? + result['is_arm'] = result.pop("is_arm", True) # Return result return result diff --git a/azure-sdk-tools/packaging_tools/templates/README.rst b/azure-sdk-tools/packaging_tools/templates/README.rst index 41025dfbb229..107264a8dc0f 100644 --- a/azure-sdk-tools/packaging_tools/templates/README.rst +++ b/azure-sdk-tools/packaging_tools/templates/README.rst @@ -2,15 +2,15 @@ Microsoft Azure SDK for Python ============================== This is the Microsoft Azure {{package_pprint_name}} Client Library. - +{% if is_arm %} Azure Resource Manager (ARM) is the next generation of management APIs that replace the old Azure Service Management (ASM). - +{% endif %} This package has been tested with Python 2.7, 3.4, 3.5 and 3.6. - +{% if is_arm %} For the older Azure Service Management (ASM) libraries, see `azure-servicemanagement-legacy `__ library. - +{% endif %} For a more complete set of Azure libraries, see the `azure `__ bundle package.