diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/Face.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/Face.json index 36a1bf92a85a..ee3b92700a83 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/Face.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/Face.json @@ -967,7 +967,7 @@ }, "/persongroups/{personGroupId}/persons/{personId}/persistedfaces": { "post": { - "description": "Add a face to a person into a large person group for face identification or verification. To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [LargePersonGroup PersonFace - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ae2966ac60f11b48b5aa3), [LargePersonGroup Person - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ade5c6ac60f11b48b5aa2) or [LargePersonGroup - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599adc216ac60f11b48b5a9f) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* Each person entry can hold up to 248 faces.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel.", + "description": "Add a face to a person into a person group for face identification or verification. To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [PersonGroup PersonFace - Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523e), [PersonGroup Person - Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523d) or [PersonGroup - Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395245) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* Each person entry can hold up to 248 faces.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel.\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [PersonGroup Person - Add Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |", "operationId": "PersonGroupPerson_AddFaceFromUrl", "parameters": [ { @@ -984,6 +984,9 @@ }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "$ref": "#/parameters/detectionModel" } ], "consumes": [ @@ -1015,7 +1018,7 @@ }, "/detect": { "post": { - "description": "Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.
\n* Optional parameters including faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise.\n* No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of the face feature and will be used in [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). It will expire 24 hours after the detection call.\n* Higher face image quality means better detection and recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* Faces are detectable when its size is 36x36 to 4096x4096 pixels. If need to detect very small but clear faces, please try to enlarge the input image.\n* Up to 64 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.\n* Face detector prefer frontal and near-frontal faces. There are cases that faces may not be detected, e.g. exceptionally large face angles (head-pose) or being occluded, or wrong image orientation.\n* Attributes (age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise) may not be perfectly accurate.\n* Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [How to specify a recognition model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model)", + "description": "Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.
\n* No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of the face feature and will be used in [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). The stored face feature(s) will expire and be deleted 24 hours after the original detection call.\n* Optional parameters include faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. Some of the results returned for specific attributes may not be highly accurate.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.\n* For optimal results when querying [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) ('returnFaceId' is true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |\n\n* Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [How to specify a recognition model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'recognition_01': | The default recognition model for [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). All those faceIds created before 2019 March are bonded with this recognition model. |\n | 'recognition_02': | Recognition model released in 2019 March. 'recognition_02' is recommended since its overall accuracy is improved compared with 'recognition_01'. |", "operationId": "Face_DetectWithUrl", "parameters": [ { @@ -1043,6 +1046,9 @@ }, { "$ref": "#/parameters/returnRecognitionModel" + }, + { + "$ref": "#/parameters/detectionModel" } ], "consumes": [ @@ -1074,7 +1080,7 @@ }, "/facelists/{faceListId}/persistedfaces": { "post": { - "description": "Add a face to a specified face list, up to 1,000 faces.\n
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [FaceList - Delete Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395251) or [FaceList - Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524f) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better detection and recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in parallel.", + "description": "Add a face to a specified face list, up to 1,000 faces.\n
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [FaceList - Delete Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395251) or [FaceList - Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524f) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better detection and recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in parallel.\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [FaceList - Add Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |", "operationId": "FaceList_AddFaceFromUrl", "parameters": [ { @@ -1088,6 +1094,9 @@ }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "$ref": "#/parameters/detectionModel" } ], "produces": [ @@ -1682,7 +1691,7 @@ }, "/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces": { "post": { - "description": "Add a face to a person into a large person group for face identification or verification. To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [LargePersonGroup PersonFace - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ae2966ac60f11b48b5aa3), [LargePersonGroup Person - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ade5c6ac60f11b48b5aa2) or [LargePersonGroup - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599adc216ac60f11b48b5a9f) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* Each person entry can hold up to 248 faces.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel.", + "description": "Add a face to a person into a large person group for face identification or verification. To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [LargePersonGroup PersonFace - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ae2966ac60f11b48b5aa3), [LargePersonGroup Person - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ade5c6ac60f11b48b5aa2) or [LargePersonGroup - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599adc216ac60f11b48b5a9f) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* Each person entry can hold up to 248 faces.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel.\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [LargePersonGroup Person - Add Face](/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |", "operationId": "LargePersonGroupPerson_AddFaceFromUrl", "parameters": [ { @@ -1699,6 +1708,9 @@ }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "$ref": "#/parameters/detectionModel" } ], "consumes": [ @@ -2071,7 +2083,7 @@ }, "/largefacelists/{largeFaceListId}/persistedfaces": { "post": { - "description": "Add a face to a specified large face list, up to 1,000,000 faces.\n
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [LargeFaceList Face - Delete](/docs/services/563879b61984550e40cbbe8d/operations/5a158c8ad2de3616c086f2d4) or [LargeFaceList - Delete](/docs/services/563879b61984550e40cbbe8d/operations/5a1580d5d2de3616c086f2cd) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in parallel.\n\nQuota:\n* Free-tier subscription quota: 1,000 faces per large face list.\n* S0-tier subscription quota: 1,000,000 faces per large face list.", + "description": "Add a face to a specified large face list, up to 1,000,000 faces.\n
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [LargeFaceList Face - Delete](/docs/services/563879b61984550e40cbbe8d/operations/5a158c8ad2de3616c086f2d4) or [LargeFaceList - Delete](/docs/services/563879b61984550e40cbbe8d/operations/5a1580d5d2de3616c086f2cd) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in parallel.\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [LargeFaceList - Add Face](/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |\n\nQuota:\n* Free-tier subscription quota: 1,000 faces per large face list.\n* S0-tier subscription quota: 1,000,000 faces per large face list.", "operationId": "LargeFaceList_AddFaceFromUrl", "parameters": [ { @@ -2085,6 +2097,9 @@ }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageUrl" + }, + { + "$ref": "#/parameters/detectionModel" } ], "produces": [ @@ -2480,7 +2495,7 @@ }, "/detect?overload=stream": { "post": { - "description": "Detect human faces in an image and returns face locations, and optionally with faceIds, landmarks, and attributes.", + "description": "Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.
\n* No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of the face feature and will be used in [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). The stored face feature(s) will expire and be deleted 24 hours after the original detection call.\n* Optional parameters include faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. Some of the results returned for specific attributes may not be highly accurate.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.\n* For optimal results when querying [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) ('returnFaceId' is true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |\n\n* Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [How to specify a recognition model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'recognition_01': | The default recognition model for [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). All those faceIds created before 2019 March are bonded with this recognition model. |\n | 'recognition_02': | Recognition model released in 2019 March. 'recognition_02' is recommended since its overall accuracy is improved compared with 'recognition_01'. |", "operationId": "Face_DetectWithStream", "parameters": [ { @@ -2508,6 +2523,9 @@ }, { "$ref": "#/parameters/returnRecognitionModel" + }, + { + "$ref": "#/parameters/detectionModel" } ], "consumes": [ @@ -2539,7 +2557,7 @@ }, "/persongroups/{personGroupId}/persons/{personId}/persistedfaces?overload=stream": { "post": { - "description": "Add a representative face to a person for identification. The input face is specified as an image with a targetFace rectangle.", + "description": "Add a face to a person into a person group for face identification or verification. To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [PersonGroup PersonFace - Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523e), [PersonGroup Person - Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523d) or [PersonGroup - Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395245) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* Each person entry can hold up to 248 faces.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel.\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [PersonGroup Person - Add Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |", "operationId": "PersonGroupPerson_AddFaceFromStream", "parameters": [ { @@ -2556,6 +2574,9 @@ }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + }, + { + "$ref": "#/parameters/detectionModel" } ], "produces": [ @@ -2587,7 +2608,7 @@ }, "/facelists/{faceListId}/persistedfaces?overload=stream": { "post": { - "description": "Add a face to a face list. The input face is specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face, and persistedFaceId will not expire.", + "description": "Add a face to a specified face list, up to 1,000 faces.\n
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [FaceList - Delete Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395251) or [FaceList - Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524f) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better detection and recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in parallel.\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [FaceList - Add Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |", "operationId": "FaceList_AddFaceFromStream", "parameters": [ { @@ -2601,6 +2622,9 @@ }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + }, + { + "$ref": "#/parameters/detectionModel" } ], "consumes": [ @@ -2632,7 +2656,7 @@ }, "/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces?overload=stream": { "post": { - "description": "Add a representative face to a person for identification. The input face is specified as an image with a targetFace rectangle.", + "description": "Add a face to a person into a large person group for face identification or verification. To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [LargePersonGroup PersonFace - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ae2966ac60f11b48b5aa3), [LargePersonGroup Person - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ade5c6ac60f11b48b5aa2) or [LargePersonGroup - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599adc216ac60f11b48b5a9f) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* Each person entry can hold up to 248 faces.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel.\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [LargePersonGroup Person - Add Face](/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |", "operationId": "LargePersonGroupPerson_AddFaceFromStream", "parameters": [ { @@ -2649,6 +2673,9 @@ }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + }, + { + "$ref": "#/parameters/detectionModel" } ], "produces": [ @@ -2680,7 +2707,7 @@ }, "/largefacelists/{largeFaceListId}/persistedfaces?overload=stream": { "post": { - "description": "Add a face to a large face list. The input face is specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face, and persistedFaceId will not expire.", + "description": "Add a face to a specified large face list, up to 1,000,000 faces.\n
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored on server until [LargeFaceList Face - Delete](/docs/services/563879b61984550e40cbbe8d/operations/5a158c8ad2de3616c086f2d4) or [LargeFaceList - Delete](/docs/services/563879b61984550e40cbbe8d/operations/5a1580d5d2de3616c086f2cd) is called.\n
Note persistedFaceId is different from faceId generated by [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236).\n* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n* \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from [Face - Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), there’s no guarantee to detect and add the face successfully.\n* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n* Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in parallel.\n* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [How to specify a detection model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)\n | Model | Recommended use-case(s) |\n | ---------- | -------- |\n | 'detection_01': | The default detection model for [LargeFaceList - Add Face](/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3). Recommend for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image orientation, the faces in such cases may not be detected. |\n | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry faces. |\n\nQuota:\n* Free-tier subscription quota: 1,000 faces per large face list.\n* S0-tier subscription quota: 1,000,000 faces per large face list.", "operationId": "LargeFaceList_AddFaceFromStream", "parameters": [ { @@ -2694,6 +2721,9 @@ }, { "$ref": "../../../Common/Parameters.json#/parameters/ImageStream" + }, + { + "$ref": "#/parameters/detectionModel" } ], "consumes": [ @@ -4238,6 +4268,24 @@ "type": "boolean", "in": "query", "x-ms-parameter-location": "method" + }, + "detectionModel": { + "name": "detectionModel", + "description": "Name of detection model. Detection model is used to detect faces in the submitted image. A detection model name can be provided when performing Face - Detect or (Large)FaceList - Add Face or (Large)PersonGroup - Add Face. The default value is 'detection_01', if another model is needed, please explicitly specify it.", + "default": "detection_01", + "required": false, + "type": "string", + "in": "query", + "x-ms-parameter-location": "method", + "x-nullable": false, + "x-ms-enum": { + "name": "DetectionModel", + "modelAsString": true + }, + "enum": [ + "detection_01", + "detection_02" + ] } } } diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddFaceListFaceFromStream.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddFaceListFaceFromStream.json index 6484a9552ddd..ba03f3780650 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddFaceListFaceFromStream.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddFaceListFaceFromStream.json @@ -11,7 +11,8 @@ 100, 100 ], - "Image": "{Image stream in base 64 encoded format}" + "Image": "{Image stream in base 64 encoded format}", + "detectionModel": "detection_01" }, "responses": { "200": { diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddFaceListFaceFromUrl.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddFaceListFaceFromUrl.json index e1592e3391fe..5cf854fce2cd 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddFaceListFaceFromUrl.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddFaceListFaceFromUrl.json @@ -11,7 +11,8 @@ 100, 100 ], - "ImageUrl": "{Image url}" + "ImageUrl": "{Image url}", + "detectionModel": "detection_01" }, "responses": { "200": { diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargeFaceListFaceFromStream.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargeFaceListFaceFromStream.json index 3f7b69fd4a6d..3435e5128732 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargeFaceListFaceFromStream.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargeFaceListFaceFromStream.json @@ -11,7 +11,8 @@ 100, 100 ], - "Image": "{Image stream in base 64 encoded format}" + "Image": "{Image stream in base 64 encoded format}", + "detectionModel": "detection_01" }, "responses": { "200": { diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargeFaceListFaceFromUrl.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargeFaceListFaceFromUrl.json index 690b0b06429d..03b6f1a74f25 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargeFaceListFaceFromUrl.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargeFaceListFaceFromUrl.json @@ -11,7 +11,8 @@ 100, 100 ], - "ImageUrl": "{Image url}" + "ImageUrl": "{Image url}", + "detectionModel": "detection_01" }, "responses": { "200": { diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargePersonGroupPersonFaceFromStream.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargePersonGroupPersonFaceFromStream.json index 2036caded53c..c6993a8a95bc 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargePersonGroupPersonFaceFromStream.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargePersonGroupPersonFaceFromStream.json @@ -12,7 +12,8 @@ 100, 100 ], - "Image": "{Image stream in base 64 encoded format}" + "Image": "{Image stream in base 64 encoded format}", + "detectionModel": "detection_01" }, "responses": { "200": { diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargePersonGroupPersonFaceFromUrl.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargePersonGroupPersonFaceFromUrl.json index b0105bc93194..70a604209ef7 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargePersonGroupPersonFaceFromUrl.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddLargePersonGroupPersonFaceFromUrl.json @@ -12,7 +12,8 @@ 100, 100 ], - "ImageUrl": "{Image url}" + "ImageUrl": "{Image url}", + "detectionModel": "detection_01" }, "responses": { "200": { diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddPersonGroupPersonFaceFromStream.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddPersonGroupPersonFaceFromStream.json index b7029b513d46..95fb0ffefa69 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddPersonGroupPersonFaceFromStream.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddPersonGroupPersonFaceFromStream.json @@ -12,7 +12,8 @@ 100, 100 ], - "Image": "{Image stream in base 64 encoded format}" + "Image": "{Image stream in base 64 encoded format}", + "detectionModel": "detection_01" }, "responses": { "200": { diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddPersonGroupPersonFaceFromUrl.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddPersonGroupPersonFaceFromUrl.json index 59646f8c8913..99c2af7df29d 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddPersonGroupPersonFaceFromUrl.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/AddPersonGroupPersonFaceFromUrl.json @@ -12,7 +12,8 @@ 100, 100 ], - "ImageUrl": "{Image url}" + "ImageUrl": "{Image url}", + "detectionModel": "detection_01" }, "responses": { "200": { diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/DetectWithStream.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/DetectWithStream.json index 808376f9f6b7..3b532c60c099 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/DetectWithStream.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/DetectWithStream.json @@ -21,7 +21,8 @@ ], "Image": "{Image binary in base 64 format}", "recognitionModel": "recognition_01", - "returnRecognitionModel": true + "returnRecognitionModel": true, + "detectionModel": "detection_01" }, "responses": { "200": { diff --git a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/DetectWithUrl.json b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/DetectWithUrl.json index 5abedc09b881..3242cb9a4b7f 100644 --- a/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/DetectWithUrl.json +++ b/specification/cognitiveservices/data-plane/Face/stable/v1.0/examples/DetectWithUrl.json @@ -21,7 +21,8 @@ ], "ImageUrl": "{Image Url here}", "recognitionModel": "recognition_01", - "returnRecognitionModel": true + "returnRecognitionModel": true, + "detectionModel": "detection_01" }, "responses": { "200": {