diff --git a/specification/cognitiveservices/data-plane/ComputerVision/readme.md b/specification/cognitiveservices/data-plane/ComputerVision/readme.md index 206d9180fb5f..d6d48bbeec72 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/readme.md +++ b/specification/cognitiveservices/data-plane/ComputerVision/readme.md @@ -4,21 +4,21 @@ Configuration for generating Computer Vision SDK. -The current release is `release_1_0`. +The current release is `release_2_0`. ``` yaml -tag: release_1_0 +tag: release_2_0 add-credentials: true openapi-type: data-plane ``` # Releases -### Release 1.0 -These settings apply only when `--tag=release_1_0` is specified on the command line. +### Release 2.0 +These settings apply only when `--tag=release_2_0` is specified on the command line. -``` yaml $(tag) == 'release_1_0' -input-file: stable/v1.0/ComputerVision.json +``` yaml $(tag) == 'release_2_0' +input-file: stable/v2.0/ComputerVision.json ``` ## Swagger to SDK @@ -48,6 +48,13 @@ csharp: namespace: Microsoft.Azure.CognitiveServices.Vision.ComputerVision output-folder: $(csharp-sdks-folder)/CognitiveServices/dataPlane/Vision/ComputerVision/ComputerVision/Generated clear-output-folder: true + +directive: + from: source-file-csharp + where: $ + transform: > + $ = $.replace( /TextRecognitionMode mode, string url,/g, "string url, TextRecognitionMode mode," ); + $ = $.replace( /mode, url,/g, "url, mode," ); ``` ## Python @@ -65,6 +72,12 @@ python: namespace: azure.cognitiveservices.vision.computervision package-name: azure-cognitiveservices-vision-computervision clear-output-folder: true + +directive: + from: source-file-python + where: $ + transform: > + $ = $.replace( /self, mode, url,/g, "self, url, mode," ); ``` ``` yaml $(python) && $(python-mode) == 'update' python: @@ -92,16 +105,16 @@ go: ``` yaml $(go) && $(multiapi) batch: - - tag: release_1_0 + - tag: release_2_0 ``` -### Tag: release_1_0 and go +### Tag: release_2_0 and go -These settings apply only when `--tag=release_1_0 --go` is specified on the command line. +These settings apply only when `--tag=release_2_0 --go` is specified on the command line. Please also specify `--go-sdk-folder=`. -``` yaml $(tag) == 'release_1_0' && $(go) -output-folder: $(go-sdk-folder)/services/cognitiveservices/v1.0/computervision +``` yaml $(tag) == 'release_2_0' && $(go) +output-folder: $(go-sdk-folder)/services/cognitiveservices/v2.0/computervision ``` @@ -119,4 +132,11 @@ java: output-folder: $(azure-libraries-for-java-folder)/azure-cognitiveservices/vision/computervision with-optional-parameters: true with-single-async-method: true + +directive: + from: source-file-java + where: $ + transform: > + $ = $.replace( /TextRecognitionMode mode, String url/g, "String url, TextRecognitionMode mode" ); + $ = $.replace( /recognizeTextWithServiceResponseAsync\(mode, url\)/g, "recognizeTextWithServiceResponseAsync(url, mode)" ) ``` diff --git a/specification/cognitiveservices/data-plane/ComputerVision/readme.nodejs.md b/specification/cognitiveservices/data-plane/ComputerVision/readme.nodejs.md index 02fef39a36dd..d7b461ede301 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/readme.nodejs.md +++ b/specification/cognitiveservices/data-plane/ComputerVision/readme.nodejs.md @@ -6,11 +6,18 @@ Please also specify `--node-sdks-folder= + $ = $.replace( /mode: string, url: string/g, "url: string, mode: string" ); + $ = $.replace( /mode, url/g, "url, mode" ); ``` diff --git a/specification/cognitiveservices/data-plane/ComputerVision/readme.ruby.md b/specification/cognitiveservices/data-plane/ComputerVision/readme.ruby.md index 25622a455949..630d4da7ee5d 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/readme.ruby.md +++ b/specification/cognitiveservices/data-plane/ComputerVision/readme.ruby.md @@ -7,22 +7,28 @@ ruby: package-name: azure_cognitiveservices_computervision package-version: "0.16.0" azure-arm: true + +directive: + from: source-file-ruby + where: $ + transform: > + $ = $.replace( /mode, url/g, "url, mode" ); ``` ### Ruby multi-api ``` yaml $(ruby) && $(multiapi) batch: - - tag: release_1_0 + - tag: release_2_0 ``` -### Tag: release_1_0 and ruby +### Tag: release_2_0 and ruby -These settings apply only when `--tag=release_1_0 --ruby` is specified on the command line. +These settings apply only when `--tag=release_2_0 --ruby` is specified on the command line. Please also specify `--ruby-sdks-folder=`. -``` yaml $(tag) == 'release_1_0' && $(ruby) -namespace: "Azure::CognitiveServices::ComputerVision::V1_0" +``` yaml $(tag) == 'release_2_0' && $(ruby) +namespace: "Azure::CognitiveServices::ComputerVision::V2_0" output-folder: $(ruby-sdks-folder)/data/azure_cognitiveservices_computervision/lib title: "ComputerVisionClient" ``` diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/ComputerVision.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/ComputerVision.json new file mode 100644 index 000000000000..afc702a14218 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/ComputerVision.json @@ -0,0 +1,1504 @@ +{ + "swagger": "2.0", + "info": { + "version": "2.0", + "title": "Computer Vision API", + "description": "The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively." + }, + "securityDefinitions": { + "apim_key": { + "type": "apiKey", + "name": "Ocp-Apim-Subscription-Key", + "in": "header" + } + }, + "security": [ + { + "apim_key": [] + } + ], + "x-ms-parameterized-host": { + "hostTemplate": "{AzureRegion}.api.cognitive.microsoft.com", + "parameters": [ + { + "$ref": "../../../Common/ExtendedRegions.json#/parameters/AzureRegion" + } + ] + }, + "basePath": "/vision/v2.0", + "schemes": [ + "https" + ], + "paths": { + "/models": { + "get": { + "description": "This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "ListModels", + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "List of available domain models.", + "schema": { + "$ref": "#/definitions/ListModelsResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful List Domains request": { + "$ref": "./examples/SuccessfulListDomainModels.json" + } + } + } + }, + "/analyze": { + "post": { + "description": "This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response.", + "operationId": "AnalyzeImage", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/VisualFeatures" + }, + { + "name": "details", + "in": "query", + "description": "A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image.", + "type": "array", + "required": false, + "collectionFormat": "csv", + "items": { + "type": "string", + "x-nullable": false, + "x-ms-enum": { + "name": "Details", + "modelAsString": false + }, + "enum": [ + "Celebrities", + "Landmarks" + ] + } + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "The response include the extracted features in JSON format.Here is the definitions for enumeration typesClipartTypeNon-clipart = 0, ambiguous = 1, normal-clipart = 2, good-clipart = 3.LineDrawingTypeNon-LineDrawing = 0,LineDrawing = 1.", + "schema": { + "$ref": "#/definitions/ImageAnalysis" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Analyze with Url request": { + "$ref": "./examples/SuccessfulAnalyzeWithUrl.json" + } + } + } + }, + "/generateThumbnail": { + "post": { + "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.", + "operationId": "GenerateThumbnail", + "consumes": [ + "application/json" + ], + "produces": [ + "application/octet-stream" + ], + "parameters": [ + { + "name": "width", + "type": "integer", + "in": "query", + "required": true, + "minimum": 1, + "maximum": 1023, + "description": "Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." + }, + { + "name": "height", + "type": "integer", + "in": "query", + "required": true, + "minimum": 1, + "maximum": 1023, + "description": "Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "name": "smartCropping", + "type": "boolean", + "in": "query", + "required": false, + "default": false, + "description": "Boolean flag for enabling smart cropping." + } + ], + "responses": { + "200": { + "description": "The generated thumbnail in binary format.", + "schema": { + "type": "file" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Generate Thumbnail request": { + "$ref": "./examples/SuccessfulGenerateThumbnailWithUrl.json" + } + } + } + }, + "/ocr": { + "post": { + "description": "Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", + "operationId": "RecognizePrintedText", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/DetectOrientation" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "$ref": "#/parameters/OcrLanguage" + } + ], + "responses": { + "200": { + "description": "The OCR results in the hierarchy of region/line/word. The results include text, bounding box for regions, lines and words.textAngleThe angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.", + "schema": { + "$ref": "#/definitions/OcrResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Ocr request": { + "$ref": "./examples/SuccessfulOcrWithUrl.json" + } + } + } + }, + "/describe": { + "post": { + "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "DescribeImage", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "maxCandidates", + "in": "query", + "description": "Maximum number of candidate descriptions to be returned. The default is 1.", + "type": "string", + "required": false, + "default": "1" + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "Image description object.", + "schema": { + "$ref": "#/definitions/ImageDescription" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Describe request": { + "$ref": "./examples/SuccessfulDescribeWithUrl.json" + } + } + } + }, + "/tag": { + "post": { + "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English.", + "operationId": "TagImage", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "Image tags object.", + "schema": { + "$ref": "#/definitions/TagResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Tag request": { + "$ref": "./examples/SuccessfulTagWithUrl.json" + } + } + } + }, + "/models/{model}/analyze": { + "post": { + "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "AnalyzeImageByDomain", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "model", + "in": "path", + "description": "The domain-specific content to recognize.", + "required": true, + "type": "string" + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "Analysis result based on the domain model", + "schema": { + "$ref": "#/definitions/DomainModelResults" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulDomainModelWithUrl.json" + } + } + } + }, + "/recognizeText": { + "post": { + "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation.", + "operationId": "RecognizeText", + "parameters": [ + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "$ref": "#/parameters/TextRecognitionMode" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "responses": { + "202": { + "description": "The service has accepted the request and will start processing later. It will return Accepted immediately and include an Operation-Location header. Client side should further query the operation status using the URL specified in this header. The operation ID will expire in 48 hours.", + "headers": { + "Operation-Location": { + "description": "URL to query for status of the operation. The operation ID will expire in 48 hours. ", + "type": "string" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulRecognizeTextWithUrl.json" + } + } + } + }, + "/textOperations/{operationId}": { + "get": { + "description": "This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface.", + "operationId": "GetTextOperationResult", + "parameters": [ + { + "name": "operationId", + "in": "path", + "description": "Id of the text operation returned in the response of the 'Recognize Text'", + "required": true, + "type": "string" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Returns the operation status.", + "schema": { + "$ref": "#/definitions/TextOperationResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulGetTextOperationResult.json" + } + } + } + } + }, + "x-ms-paths": { + "/analyze?overload=stream": { + "post": { + "description": "This operation extracts a rich set of visual features based on the image content.", + "operationId": "AnalyzeImageInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/VisualFeatures" + }, + { + "name": "details", + "in": "query", + "description": "A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image.", + "type": "string", + "required": false, + "enum": [ + "Celebrities", + "Landmarks" + ] + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "The response include the extracted features in JSON format. Here is the definitions for enumeration types clipart = 0, ambiguous = 1, normal-clipart = 2, good-clipart = 3. Non-LineDrawing = 0,LineDrawing = 1.", + "schema": { + "$ref": "#/definitions/ImageAnalysis" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Analyze with Url request": { + "$ref": "./examples/SuccessfulAnalyzeWithStream.json" + } + } + } + }, + "/generateThumbnail?overload=stream": { + "post": { + "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.", + "operationId": "GenerateThumbnailInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/octet-stream" + ], + "parameters": [ + { + "name": "width", + "type": "integer", + "in": "query", + "required": true, + "minimum": 1, + "maximum": 1023, + "description": "Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." + }, + { + "name": "height", + "type": "integer", + "in": "query", + "required": true, + "minimum": 1, + "maximum": 1023, + "description": "Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50." + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + }, + { + "name": "smartCropping", + "type": "boolean", + "in": "query", + "required": false, + "default": false, + "description": "Boolean flag for enabling smart cropping." + } + ], + "responses": { + "200": { + "description": "The generated thumbnail in binary format.", + "schema": { + "type": "file" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Generate Thumbnail request": { + "$ref": "./examples/SuccessfulGenerateThumbnailWithStream.json" + } + } + } + }, + "/ocr?overload=stream": { + "post": { + "description": "Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", + "operationId": "RecognizePrintedTextInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/OcrLanguage" + }, + { + "$ref": "#/parameters/DetectOrientation" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "The OCR results in the hierarchy of region/line/word. The results include text, bounding box for regions, lines and words. The angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.", + "schema": { + "$ref": "#/definitions/OcrResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Ocr request": { + "$ref": "./examples/SuccessfulOcrWithStream.json" + } + } + } + }, + "/describe?overload=stream": { + "post": { + "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "DescribeImageInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "maxCandidates", + "in": "query", + "description": "Maximum number of candidate descriptions to be returned. The default is 1.", + "type": "string", + "required": false, + "default": "1" + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "Image description object.", + "schema": { + "$ref": "#/definitions/ImageDescription" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Describe request": { + "$ref": "./examples/SuccessfulDescribeWithStream.json" + } + } + } + }, + "/tag?overload=stream": { + "post": { + "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English.", + "operationId": "TagImageInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "Image tags object.", + "schema": { + "$ref": "#/definitions/TagResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Tag request": { + "$ref": "./examples/SuccessfulTagWithStream.json" + } + } + } + }, + "/models/{model}/analyze?overload=stream": { + "post": { + "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "AnalyzeImageByDomainInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "model", + "in": "path", + "description": "The domain-specific content to recognize.", + "required": true, + "type": "string" + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "Analysis result based on the domain model", + "schema": { + "$ref": "#/definitions/DomainModelResults" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulDomainModelWithStream.json" + } + } + } + }, + "/recognizeText?overload=stream": { + "post": { + "description": "Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation.", + "operationId": "RecognizeTextInStream", + "parameters": [ + { + "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + }, + { + "$ref": "#/parameters/TextRecognitionMode" + } + ], + "consumes": [ + "application/octet-stream" + ], + "produces": [ + "application/json" + ], + "responses": { + "202": { + "description": "The service has accepted the request and will start processing later.", + "headers": { + "Operation-Location": { + "description": "URL to query for status of the operation. The operation ID will expire in 48 hours. ", + "type": "string" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Domain Model analysis request": { + "$ref": "./examples/SuccessfulRecognizeTextWithStream.json" + } + } + } + } + }, + "definitions": { + "TextOperationResult": { + "type": "object", + "properties": { + "status": { + "type": "string", + "description": "Status of the text operation.", + "enum": [ + "Not Started", + "Running", + "Failed", + "Succeeded" + ], + "x-ms-enum": { + "name": "TextOperationStatusCodes", + "modelAsString": false + }, + "x-nullable": false + }, + "recognitionResult": { + "$ref": "#/definitions/RecognitionResult" + } + } + }, + "RecognitionResult": { + "type": "object", + "properties": { + "lines": { + "type": "array", + "items": { + "$ref": "#/definitions/Line" + } + } + } + }, + "Line": { + "type": "object", + "properties": { + "boundingBox": { + "$ref": "#/definitions/BoundingBox" + }, + "text": { + "type": "string" + }, + "words": { + "type": "array", + "items": { + "$ref": "#/definitions/Word" + } + } + } + }, + "Word": { + "type": "object", + "properties": { + "boundingBox": { + "$ref": "#/definitions/BoundingBox" + }, + "text": { + "type": "string" + } + } + }, + "BoundingBox": { + "type": "array", + "items": { + "type": "integer", + "x-nullable": false + } + }, + "ImageAnalysis": { + "type": "object", + "description": "Result of AnalyzeImage operation.", + "properties": { + "categories": { + "type": "array", + "description": "An array indicating identified categories.", + "items": { + "$ref": "#/definitions/Category" + } + }, + "adult": { + "$ref": "#/definitions/AdultInfo" + }, + "color": { + "$ref": "#/definitions/ColorInfo" + }, + "imageType": { + "$ref": "#/definitions/ImageType" + }, + "tags": { + "type": "array", + "description": "A list of tags with confidence level.", + "items": { + "$ref": "#/definitions/ImageTag" + } + }, + "description": { + "$ref": "#/definitions/ImageDescriptionDetails" + }, + "faces": { + "type": "array", + "description": "An array of possible faces within the image.", + "items": { + "$ref": "#/definitions/FaceDescription" + } + }, + "requestId": { + "type": "string", + "description": "Id of the request for tracking purposes." + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + } + }, + "OcrResult": { + "type": "object", + "properties": { + "language": { + "type": "string", + "description": "The BCP-47 language code of the text in the image." + }, + "textAngle": { + "type": "number", + "format": "double", + "description": "The angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly." + }, + "orientation": { + "type": "string", + "description": "Orientation of the text recognized in the image. The value (up,down,left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property)." + }, + "regions": { + "type": "array", + "description": "An array of objects, where each object represents a region of recognized text.", + "items": { + "$ref": "#/definitions/OcrRegion" + } + } + } + }, + "OcrRegion": { + "type": "object", + "description": "A region consists of multiple lines (e.g. a column of text in a multi-column document).", + "properties": { + "boundingBox": { + "type": "string", + "description": "Bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down." + }, + "lines": { + "type": "array", + "items": { + "$ref": "#/definitions/OcrLine" + } + } + } + }, + "OcrLine": { + "type": "object", + "description": "An object describing a single recognized line of text.", + "properties": { + "boundingBox": { + "type": "string", + "description": "Bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down." + }, + "words": { + "type": "array", + "description": "An array of objects, where each object represents a recognized word.", + "items": { + "$ref": "#/definitions/OcrWord" + } + } + } + }, + "OcrWord": { + "type": "object", + "description": "Information on a recognized word.", + "properties": { + "boundingBox": { + "type": "string", + "description": "Bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down." + }, + "text": { + "type": "string", + "description": "String value of a recognized word." + } + } + }, + "ListModelsResult": { + "type": "object", + "description": "Result of the List Domain Models operation.", + "properties": { + "models": { + "type": "array", + "readOnly": true, + "description": "An array of supported models.", + "items": { + "$ref": "#/definitions/ModelDescription" + } + } + } + }, + "ModelDescription": { + "type": "object", + "description": "An object describing supported model by name and categories.", + "properties": { + "name": { + "type": "string" + }, + "categories": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "DomainModelResults": { + "type": "object", + "description": "Result of image analysis using a specific domain model including additional metadata.", + "properties": { + "result": { + "x-ms-client-flatten": true, + "type": "object", + "description": "Model-specific response" + }, + "requestId": { + "type": "string", + "description": "Id of the REST API request." + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + } + }, + "CelebrityResults": { + "type": "object", + "description": "List of celebrities recognized in the image.", + "properties": { + "celebrities": { + "type": "array", + "items": { + "$ref": "#/definitions/CelebritiesModel" + } + }, + "requestId": { + "type": "string", + "description": "Id of the REST API request." + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + } + }, + "LandmarkResults": { + "type": "object", + "description": "List of landmarks recognized in the image.", + "properties": { + "landmarks": { + "type": "array", + "items": { + "$ref": "#/definitions/LandmarksModel" + } + }, + "requestId": { + "type": "string", + "description": "Id of the REST API request." + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + } + }, + "ImageDescription": { + "type": "object", + "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.", + "properties": { + "description": { + "x-ms-client-flatten": true, + "$ref": "#/definitions/ImageDescriptionDetails" + }, + "requestId": { + "type": "string", + "description": "Id of the REST API request." + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + } + }, + "TagResult": { + "type": "object", + "description": "The results of a image tag operation, including any tags and image metadata.", + "properties": { + "tags": { + "type": "array", + "description": "A list of tags with confidence level.", + "items": { + "$ref": "#/definitions/ImageTag" + } + }, + "requestId": { + "type": "string", + "description": "Id of the REST API request." + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + } + }, + "ImageDescriptionDetails": { + "type": "object", + "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.", + "properties": { + "tags": { + "type": "array", + "description": "A collection of image tags.", + "items": { + "type": "string" + } + }, + "captions": { + "type": "array", + "description": "A list of captions, sorted by confidence level.", + "items": { + "$ref": "#/definitions/ImageCaption" + } + } + } + }, + "ImageCaption": { + "type": "object", + "description": "An image caption, i.e. a brief description of what the image depicts.", + "properties": { + "text": { + "type": "string", + "description": "The text of the caption" + }, + "confidence": { + "type": "number", + "format": "double", + "description": "The level of confidence the service has in the caption" + } + } + }, + "ImageTag": { + "type": "object", + "description": "An image caption, i.e. a brief description of what the image depicts.", + "properties": { + "name": { + "type": "string", + "description": "The tag value" + }, + "confidence": { + "type": "number", + "format": "double", + "description": "The level of confidence the service has in the caption" + } + } + }, + "ImageMetadata": { + "type": "object", + "description": "Image metadata", + "properties": { + "width": { + "type": "integer", + "format": "int32", + "description": "Image width" + }, + "height": { + "type": "integer", + "format": "int32", + "description": "Image height" + }, + "format": { + "type": "string", + "description": "Image format" + } + } + }, + "CelebritiesModel": { + "type": "object", + "description": "An object describing possible celebrity identification.", + "properties": { + "name": { + "type": "string", + "description": "Name of the celebrity." + }, + "confidence": { + "type": "number", + "format": "double", + "description": "Level of confidence ranging from 0 to 1." + }, + "faceRectangle": { + "$ref": "#/definitions/FaceRectangle" + } + } + }, + "LandmarksModel": { + "type": "object", + "description": "A landmark recognized in the image", + "properties": { + "name": { + "type": "string", + "description": "Name of the landmark." + }, + "confidence": { + "type": "number", + "format": "double", + "description": "Confidence level for the landmark recognition." + } + } + }, + "FaceRectangle": { + "type": "object", + "description": "An object describing face rectangle.", + "properties": { + "left": { + "type": "integer", + "description": "X-coordinate of the top left point of the face." + }, + "top": { + "type": "integer", + "description": "Y-coordinate of the top left point of the face." + }, + "width": { + "type": "integer", + "description": "Width measured from the top-left point of the face." + }, + "height": { + "type": "integer", + "description": "Height measured from the top-left point of the face." + } + } + }, + "FaceDescription": { + "type": "object", + "description": "An object describing a face identified in the image.", + "properties": { + "age": { + "type": "integer", + "description": "Possible age of the face." + }, + "gender": { + "type": "string", + "description": "Possible gender of the face.", + "x-ms-enum": { + "name": "Gender-", + "modelAsString": false + }, + "enum": [ + "Male", + "Female" + ] + }, + "faceRectangle": { + "$ref": "#/definitions/FaceRectangle" + } + } + }, + "ImageType": { + "type": "object", + "description": "An object providing possible image types and matching confidence levels.", + "properties": { + "clipArtType": { + "type": "number", + "description": "Confidence level that the image is a clip art." + }, + "lineDrawingType": { + "type": "number", + "description": "Confidence level that the image is a line drawing." + } + } + }, + "ColorInfo": { + "type": "object", + "description": "An object providing additional metadata describing color attributes.", + "properties": { + "dominantColorForeground": { + "type": "string", + "description": "Possible dominant foreground color." + }, + "dominantColorBackground": { + "type": "string", + "description": "Possible dominant background color." + }, + "dominantColors": { + "type": "array", + "description": "An array of possible dominant colors.", + "items": { + "type": "string" + } + }, + "accentColor": { + "type": "string", + "description": "Possible accent color." + }, + "isBWImg": { + "type": "boolean", + "description": "A value indicating if the image is black and white." + } + } + }, + "AdultInfo": { + "type": "object", + "description": "An object describing whether the image contains adult-oriented content and/or is racy.", + "properties": { + "isAdultContent": { + "type": "boolean", + "x-nullable": false, + "description": "A value indicating if the image contains adult-oriented content." + }, + "isRacyContent": { + "type": "boolean", + "x-nullable": false, + "description": "A value indicating if the image is race." + }, + "adultScore": { + "type": "number", + "format": "double", + "x-nullable": false, + "description": "Score from 0 to 1 that indicates how much of adult content is within the image." + }, + "racyScore": { + "type": "number", + "format": "double", + "x-nullable": false, + "description": "Score from 0 to 1 that indicates how suggestive is the image." + } + } + }, + "Category": { + "type": "object", + "description": "An object describing identified category.", + "properties": { + "name": { + "type": "string", + "description": "Name of the category." + }, + "score": { + "type": "number", + "format": "double", + "description": "Scoring of the category." + }, + "detail": { + "$ref": "#/definitions/CategoryDetail" + } + } + }, + "CategoryDetail": { + "type": "object", + "description": "An object describing additional category details.", + "properties": { + "celebrities": { + "type": "array", + "description": "An array of celebrities if any identified.", + "items": { + "$ref": "#/definitions/CelebritiesModel" + } + }, + "landmarks": { + "type": "array", + "description": "An array of landmarks if any identified.", + "items": { + "$ref": "#/definitions/LandmarksModel" + } + } + } + }, + "ComputerVisionError": { + "type": "object", + "required": [ + "code", + "message" + ], + "properties": { + "code": { + "type": "string", + "description": "The error code.", + "enum": [ + "InvalidImageUrl", + "InvalidImageFormat", + "InvalidImageSize", + "NotSupportedVisualFeature", + "NotSupportedImage", + "InvalidDetails", + "NotSupportedLanguage", + "BadArgument", + "FailedToProcess", + "Timeout", + "InternalServerError", + "Unspecified", + "StorageException" + ], + "x-ms-enum": { + "name": "ComputerVisionErrorCodes", + "modelAsString": false + } + }, + "message": { + "type": "string", + "description": "A message explaining the error reported by the service." + }, + "requestId": { + "type": "string", + "description": "A unique request identifier." + } + } + }, + "ServiceLanguage": { + "type": "string" + } + }, + "parameters": { + "VisualFeatures": { + "name": "visualFeatures", + "in": "query", + "description": "A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected.", + "type": "array", + "x-ms-parameter-location": "method", + "required": false, + "collectionFormat": "csv", + "items": { + "type": "string", + "x-nullable": false, + "x-ms-enum": { + "name": "VisualFeatureTypes", + "modelAsString": false + }, + "enum": [ + "ImageType", + "Faces", + "Adult", + "Categories", + "Color", + "Tags", + "Description" + ] + } + }, + "OcrLanguage": { + "name": "language", + "in": "query", + "description": "The BCP-47 language code of the text to be detected in the image. The default value is 'unk'", + "type": "string", + "required": false, + "x-ms-parameter-location": "method", + "x-nullable": false, + "x-ms-enum": { + "name": "OcrLanguages", + "modelAsString": false + }, + "default": "unk", + "enum": [ + "unk", + "zh-Hans", + "zh-Hant", + "cs", + "da", + "nl", + "en", + "fi", + "fr", + "de", + "el", + "hu", + "it", + "ja", + "ko", + "nb", + "pl", + "pt", + "ru", + "es", + "sv", + "tr", + "ar", + "ro", + "sr-Cyrl", + "sr-Latn", + "sk" + ] + }, + "DetectOrientation": { + "name": "detectOrientation", + "in": "query", + "description": "Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). ", + "required": true, + "x-ms-parameter-location": "method", + "type": "boolean", + "default": true + }, + "TextRecognitionMode": { + "name": "mode", + "in": "query", + "description": "Type of text to recognize.", + "required": true, + "x-ms-parameter-location": "method", + "type": "string", + "x-ms-enum": { + "name": "TextRecognitionMode", + "modelAsString": false + }, + "enum": [ + "Handwritten", + "Printed" + ] + }, + "ServiceLanguage": { + "name": "language", + "in": "query", + "description": "The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.", + "type": "string", + "required": false, + "x-ms-parameter-location": "method", + "x-nullable": false, + "default": "en", + "enum": [ + "en", + "es", + "ja", + "pt", + "zh" + ] + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulAnalyzeWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulAnalyzeWithStream.json new file mode 100644 index 000000000000..562773f07549 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulAnalyzeWithStream.json @@ -0,0 +1,118 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "visualFeatures": ["Categories", "Adult", "Tags", "Description", "Faces", "Color", "ImageType"], + "details": "Celebrities", + "language": "en", + "Image": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "categories": [ + { + "name": "abstract_", + "score": 0.00390625 + }, + { + "name": "people_", + "score": 0.83984375, + "detail": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ], + "landmarks": [ + { + "name": "Forbidden City", + "confidence": 0.9978346 + } + ] + } + } + ], + "adult": { + "isAdultContent": false, + "isRacyContent": false, + "adultScore": 0.0934349000453949, + "racyScore": 0.068613491952419281 + }, + "tags": [ + { + "name": "person", + "confidence": 0.98979085683822632 + }, + { + "name": "man", + "confidence": 0.94493889808654785 + }, + { + "name": "outdoor", + "confidence": 0.938492476940155 + }, + { + "name": "window", + "confidence": 0.89513939619064331 + } + ], + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + } + ] + }, + "requestId": "0dbec5ad-a3d3-4f7e-96b4-dfd57efe967d", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "faces": [ + { + "age": 44, + "gender": "Male", + "faceRectangle": { + "left": 593, + "top": 160, + "width": 250, + "height": 250 + } + } + ], + "color": { + "dominantColorForeground": "Brown", + "dominantColorBackground": "Brown", + "dominantColors": [ + "Brown", + "Black" + ], + "accentColor": "873B59", + "isBWImg": false + }, + "imageType": { + "clipArtType": 0, + "lineDrawingType": 0 + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulAnalyzeWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulAnalyzeWithUrl.json new file mode 100644 index 000000000000..37da3d805f70 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulAnalyzeWithUrl.json @@ -0,0 +1,124 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "visualFeatures": "Categories,Adult,Tags,Description,Faces,Color,ImageType", + "details": "Celebrities,Landmarks", + "language": "en", + "ImageUrl": "{url}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "categories": [ + { + "name": "abstract_", + "score": 0.00390625 + }, + { + "name": "people_", + "score": 0.83984375, + "detail": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ] + } + }, + { + "name": "building_", + "score": 0.984375, + "detail": { + "landmarks": [ + { + "name": "Forbidden City", + "confidence": 0.98290169239044189 + } + ] + } + } + ], + "adult": { + "isAdultContent": false, + "isRacyContent": false, + "adultScore": 0.0934349000453949, + "racyScore": 0.068613491952419281 + }, + "tags": [ + { + "name": "person", + "confidence": 0.98979085683822632 + }, + { + "name": "man", + "confidence": 0.94493889808654785 + }, + { + "name": "outdoor", + "confidence": 0.938492476940155 + }, + { + "name": "window", + "confidence": 0.89513939619064331 + } + ], + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + } + ] + }, + "requestId": "0dbec5ad-a3d3-4f7e-96b4-dfd57efe967d", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "faces": [ + { + "age": 44, + "gender": "Male", + "faceRectangle": { + "left": 593, + "top": 160, + "width": 250, + "height": 250 + } + } + ], + "color": { + "dominantColorForeground": "Brown", + "dominantColorBackground": "Brown", + "dominantColors": [ + "Brown", + "Black" + ], + "accentColor": "873B59", + "isBWImg": false + }, + "imageType": { + "clipArtType": 0, + "lineDrawingType": 0 + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDescribeWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDescribeWithStream.json new file mode 100644 index 000000000000..fce78185bcb8 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDescribeWithStream.json @@ -0,0 +1,44 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "maxCandidates": "1", + "Image": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + }, + { + "text": "Satya Nadella is sitting on a bench", + "confidence": 0.40037006815422832 + }, + { + "text": "Satya Nadella sitting in front of a building", + "confidence": 0.38035155997373377 + } + ] + }, + "requestId": "ed2de1c6-fb55-4686-b0da-4da6e05d283f", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDescribeWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDescribeWithUrl.json new file mode 100644 index 000000000000..07daa93f048f --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDescribeWithUrl.json @@ -0,0 +1,44 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "maxCandidates": "1", + "ImageUrl": "{url}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + }, + { + "text": "Satya Nadella is sitting on a bench", + "confidence": 0.40037006815422832 + }, + { + "text": "Satya Nadella sitting in front of a building", + "confidence": 0.38035155997373377 + } + ] + }, + "requestId": "ed2de1c6-fb55-4686-b0da-4da6e05d283f", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDomainModelWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDomainModelWithStream.json new file mode 100644 index 000000000000..153e36a977a0 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDomainModelWithStream.json @@ -0,0 +1,34 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "model": "Celebrities", + "Image": "{binary}" + }, + "responses": { + "200": { + "body": { + "requestId": "f0027b4b-dc0d-4082-9228-1545ed246b03", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "result": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ] + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDomainModelWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDomainModelWithUrl.json new file mode 100644 index 000000000000..d79f1db11a44 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulDomainModelWithUrl.json @@ -0,0 +1,34 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "model": "Celebrities", + "ImageUrl": "{url}" + }, + "responses": { + "200": { + "body": { + "requestId": "f0027b4b-dc0d-4082-9228-1545ed246b03", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "result": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ] + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulGenerateThumbnailWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulGenerateThumbnailWithStream.json new file mode 100644 index 000000000000..b8622ed4a62a --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulGenerateThumbnailWithStream.json @@ -0,0 +1,16 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "width": "500", + "height": "500", + "smartCropping": true, + "Image": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": "{binary}" + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulGenerateThumbnailWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulGenerateThumbnailWithUrl.json new file mode 100644 index 000000000000..01bea96b8795 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulGenerateThumbnailWithUrl.json @@ -0,0 +1,16 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "width": "500", + "height": "500", + "smartCropping": true, + "ImageUrl": "{url}" + }, + "responses": { + "200": { + "headers": {}, + "body": "{Binary}" + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulGetTextOperationResult.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulGetTextOperationResult.json new file mode 100644 index 000000000000..bc061ddf0fb5 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulGetTextOperationResult.json @@ -0,0 +1,272 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "operationId": "49a36324-fc4b-4387-aa06-090cfbf0064f" + }, + "responses": { + "200": { + "header": {}, + "body": { + "status": "Succeeded", + "recognitionResult": { + "lines": [ + { + "boundingBox": [ + 202, + 618, + 2047, + 643, + 2046, + 840, + 200, + 813 + ], + "text": "Our greatest glory is not", + "words": [ + { + "boundingBox": [ + 204, + 627, + 481, + 628, + 481, + 830, + 204, + 829 + ], + "text": "Our" + }, + { + "boundingBox": [ + 519, + 628, + 1057, + 630, + 1057, + 832, + 518, + 830 + ], + "text": "greatest" + }, + { + "boundingBox": [ + 1114, + 630, + 1549, + 631, + 1548, + 833, + 1114, + 832 + ], + "text": "glory" + }, + { + "boundingBox": [ + 1586, + 631, + 1785, + 632, + 1784, + 834, + 1586, + 833 + ], + "text": "is" + }, + { + "boundingBox": [ + 1822, + 632, + 2115, + 633, + 2115, + 835, + 1822, + 834 + ], + "text": "not" + } + ] + }, + { + "boundingBox": [ + 420, + 1273, + 2954, + 1250, + 2958, + 1488, + 422, + 1511 + ], + "text": "but in rising every time we fall", + "words": [ + { + "boundingBox": [ + 423, + 1269, + 634, + 1268, + 635, + 1507, + 424, + 1508 + ], + "text": "but" + }, + { + "boundingBox": [ + 667, + 1268, + 808, + 1268, + 809, + 1506, + 668, + 1507 + ], + "text": "in" + }, + { + "boundingBox": [ + 874, + 1267, + 1289, + 1265, + 1290, + 1504, + 875, + 1506 + ], + "text": "rising" + }, + { + "boundingBox": [ + 1331, + 1265, + 1771, + 1263, + 1772, + 1502, + 1332, + 1504 + ], + "text": "every" + }, + { + "boundingBox": [ + 1812, + 1263, + 2178, + 1261, + 2179, + 1500, + 1813, + 1502 + ], + "text": "time" + }, + { + "boundingBox": [ + 2219, + 1261, + 2510, + 1260, + 2511, + 1498, + 2220, + 1500 + ], + "text": "we" + }, + { + "boundingBox": [ + 2551, + 1260, + 3016, + 1258, + 3017, + 1496, + 2552, + 1498 + ], + "text": "fall" + } + ] + }, + { + "boundingBox": [ + 1612, + 903, + 2744, + 935, + 2738, + 1139, + 1607, + 1107 + ], + "text": "in never failing ,", + "words": [ + { + "boundingBox": [ + 1611, + 934, + 1707, + 933, + 1708, + 1147, + 1613, + 1147 + ], + "text": "in" + }, + { + "boundingBox": [ + 1753, + 933, + 2132, + 930, + 2133, + 1144, + 1754, + 1146 + ], + "text": "never" + }, + { + "boundingBox": [ + 2162, + 930, + 2673, + 927, + 2674, + 1140, + 2164, + 1144 + ], + "text": "failing" + }, + { + "boundingBox": [ + 2703, + 926, + 2788, + 926, + 2790, + 1139, + 2705, + 1140 + ], + "text": "," + } + ] + } + ] + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulListDomainModels.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulListDomainModels.json new file mode 100644 index 000000000000..9c43b772ed4d --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulListDomainModels.json @@ -0,0 +1,27 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "models": [ + { + "name": "celebrities", + "categories": [ + "people_" + ] + }, + { + "name": "landmarks", + "categories": [ + "building_" + ] + } + ] + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulOcrWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulOcrWithStream.json new file mode 100644 index 000000000000..fa75fb8e5801 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulOcrWithStream.json @@ -0,0 +1,77 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "detectOrientation": "true", + "language": "en", + "Image": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "language": "en", + "textAngle": -2.0000000000000338, + "orientation": "Up", + "regions": [ + { + "boundingBox": "462,379,497,258", + "lines": [ + { + "boundingBox": "462,379,497,74", + "words": [ + { + "boundingBox": "462,379,41,73", + "text": "A" + }, + { + "boundingBox": "523,379,153,73", + "text": "GOAL" + }, + { + "boundingBox": "694,379,265,74", + "text": "WITHOUT" + } + ] + }, + { + "boundingBox": "565,471,289,74", + "words": [ + { + "boundingBox": "565,471,41,73", + "text": "A" + }, + { + "boundingBox": "626,471,150,73", + "text": "PLAN" + }, + { + "boundingBox": "801,472,53,73", + "text": "IS" + } + ] + }, + { + "boundingBox": "519,563,375,74", + "words": [ + { + "boundingBox": "519,563,149,74", + "text": "JUST" + }, + { + "boundingBox": "683,564,41,72", + "text": "A" + }, + { + "boundingBox": "741,564,153,73", + "text": "WISH" + } + ] + } + ] + } + ] + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulOcrWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulOcrWithUrl.json new file mode 100644 index 000000000000..f977123bfab8 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulOcrWithUrl.json @@ -0,0 +1,77 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "detectOrientation": "true", + "language": "en", + "ImageUrl": "{url}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "language": "en", + "textAngle": -2.0000000000000338, + "orientation": "Up", + "regions": [ + { + "boundingBox": "462,379,497,258", + "lines": [ + { + "boundingBox": "462,379,497,74", + "words": [ + { + "boundingBox": "462,379,41,73", + "text": "A" + }, + { + "boundingBox": "523,379,153,73", + "text": "GOAL" + }, + { + "boundingBox": "694,379,265,74", + "text": "WITHOUT" + } + ] + }, + { + "boundingBox": "565,471,289,74", + "words": [ + { + "boundingBox": "565,471,41,73", + "text": "A" + }, + { + "boundingBox": "626,471,150,73", + "text": "PLAN" + }, + { + "boundingBox": "801,472,53,73", + "text": "IS" + } + ] + }, + { + "boundingBox": "519,563,375,74", + "words": [ + { + "boundingBox": "519,563,149,74", + "text": "JUST" + }, + { + "boundingBox": "683,564,41,72", + "text": "A" + }, + { + "boundingBox": "741,564,153,73", + "text": "WISH" + } + ] + } + ] + } + ] + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulRecognizeTextWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulRecognizeTextWithStream.json new file mode 100644 index 000000000000..27c8173c458b --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulRecognizeTextWithStream.json @@ -0,0 +1,15 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "mode": "Handwritten", + "Image": "{binary}" + }, + "responses": { + "202": { + "header": { + "Operation-Location": "https://{domain}/vision/v2.0/textOperations/49a36324-fc4b-4387-aa06-090cfbf0064f" + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulRecognizeTextWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulRecognizeTextWithUrl.json new file mode 100644 index 000000000000..98777b81141d --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulRecognizeTextWithUrl.json @@ -0,0 +1,15 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "mode": "Handwritten", + "ImageUrl": "{url}" + }, + "responses": { + "202": { + "header": { + "Operation-Location": "https://westus.api.cognitive.microsoft.com/vision/v2.0/textOperations/49a36324-fc4b-4387-aa06-090cfbf0064f" + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulTagWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulTagWithStream.json new file mode 100644 index 000000000000..f17d6e2ceb8a --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulTagWithStream.json @@ -0,0 +1,53 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "Image": "{binary}" + }, + "responses": { + "200": { + "body": { + "tags": [ + { + "name": "grass", + "confidence": 0.9999997615814209 + }, + { + "name": "outdoor", + "confidence": 0.99997067451477051 + }, + { + "name": "sky", + "confidence": 0.99928975105285645 + }, + { + "name": "building", + "confidence": 0.99646323919296265 + }, + { + "name": "house", + "confidence": 0.99279803037643433 + }, + { + "name": "lawn", + "confidence": 0.82268029451370239 + }, + { + "name": "green", + "confidence": 0.64122253656387329 + }, + { + "name": "residential", + "confidence": 0.31403225660324097 + } + ], + "requestId": "1ad0e45e-b7b4-4be3-8042-53be96103337", + "metadata": { + "width": 400, + "height": 400, + "format": "Jpeg" + } + } + } + } +} \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulTagWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulTagWithUrl.json new file mode 100644 index 000000000000..7d018bc6697e --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v2.0/examples/SuccessfulTagWithUrl.json @@ -0,0 +1,53 @@ +{ + "parameters": { + "AzureRegion": "Westus", + "Ocp-Apim-Subscription-Key": "{API key}", + "ImageUrl": "{url}" + }, + "responses": { + "200": { + "body": { + "tags": [ + { + "name": "grass", + "confidence": 0.9999997615814209 + }, + { + "name": "outdoor", + "confidence": 0.99997067451477051 + }, + { + "name": "sky", + "confidence": 0.99928975105285645 + }, + { + "name": "building", + "confidence": 0.99646323919296265 + }, + { + "name": "house", + "confidence": 0.99279803037643433 + }, + { + "name": "lawn", + "confidence": 0.82268029451370239 + }, + { + "name": "green", + "confidence": 0.64122253656387329 + }, + { + "name": "residential", + "confidence": 0.31403225660324097 + } + ], + "requestId": "1ad0e45e-b7b4-4be3-8042-53be96103337", + "metadata": { + "width": 400, + "height": 400, + "format": "Jpeg" + } + } + } + } +} \ No newline at end of file