diff --git a/packages/vision/package.json b/packages/vision/package.json
index da46576a273..70010f9742b 100644
--- a/packages/vision/package.json
+++ b/packages/vision/package.json
@@ -55,11 +55,11 @@
"arrify": "^1.0.0",
"async": "^2.0.1",
"extend": "^3.0.0",
- "google-gax": "^0.12.0",
- "google-proto-files": "^0.9.1",
+ "google-gax": "^0.12.1",
+ "google-proto-files": "^0.10.0",
"is": "^3.0.1",
+ "prop-assign": "^1.0.0",
"propprop": "^0.3.0",
- "request": "^2.70.0",
"rgb-hex": "^1.0.0",
"string-format-obj": "^1.0.0"
},
diff --git a/packages/vision/src/index.js b/packages/vision/src/index.js
index ef58c1e7762..497ec39f412 100644
--- a/packages/vision/src/index.js
+++ b/packages/vision/src/index.js
@@ -29,9 +29,10 @@ var format = require('string-format-obj');
var fs = require('fs');
var is = require('is');
var prop = require('propprop');
-var request = require('request');
+var propAssign = require('prop-assign');
var rgbHex = require('rgb-hex');
-var util = require('util');
+
+var v1 = require('./v1');
var VERY_UNLIKELY = 0;
var UNLIKELY = 1;
@@ -59,20 +60,11 @@ function Vision(options) {
return new Vision(options);
}
- var config = {
- baseUrl: 'https://vision.googleapis.com/v1',
- projectIdRequired: false,
- scopes: [
- 'https://www.googleapis.com/auth/cloud-platform'
- ],
- packageJson: require('../package.json')
+ this.api = {
+ Vision: v1(options).imageAnnotatorClient(options)
};
-
- common.Service.call(this, config, options);
}
-util.inherits(Vision, common.Service);
-
Vision.likelihood = {
VERY_UNLIKELY: VERY_UNLIKELY,
UNLIKELY: UNLIKELY,
@@ -118,12 +110,8 @@ Vision.likelihood = {
* });
*/
Vision.prototype.annotate = function(requests, callback) {
- this.request({
- method: 'POST',
- uri: 'images:annotate',
- json: {
- requests: arrify(requests)
- }
+ this.api.Vision.batchAnnotateImages({
+ requests: arrify(requests)
}, function(err, resp) {
if (err) {
callback(err, null, resp);
@@ -170,8 +158,9 @@ Vision.prototype.annotate = function(requests, callback) {
* @param {number} options.maxResults - The maximum number of results, per type,
* to return in the response.
* @param {string[]} options.types - An array of feature types to detect from
- * the provided images. Acceptable values: `faces`, `landmarks`, `labels`,
- * `logos`, `properties`, `safeSearch`, `text`.
+ * the provided images. Acceptable values: `crops`, `document`, `faces`,
+ * `landmarks`, `labels`, `logos`, `properties`, `safeSearch`, `similar`,
+ * `text`.
* @param {boolean=} options.verbose - Use verbose mode, which returns a less-
* simplistic representation of the annotation (default: `false`).
* @param {function} callback - The callback function.
@@ -203,12 +192,9 @@ Vision.prototype.annotate = function(requests, callback) {
*
* //-
* // Run feature detection over a remote image.
- * //
- * // *Note: This is not an officially supported feature of the Vision API. Our
- * // library will make a request to the URL given, convert it to base64, and
- * // send that upstream.*
* //-
* var img = 'https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png';
+ *
* vision.detect(img, types, function(err, detection, apiResponse) {});
*
* //-
@@ -308,6 +294,12 @@ Vision.prototype.detect = function(images, options, callback) {
var types = arrify(options.types);
var typeShortNameToFullName = {
+ crop: 'CROP_HINTS',
+ crops: 'CROP_HINTS',
+
+ doc: 'DOCUMENT_TEXT_DETECTION',
+ document: 'DOCUMENT_TEXT_DETECTION',
+
face: 'FACE_DETECTION',
faces: 'FACE_DETECTION',
@@ -324,10 +316,18 @@ Vision.prototype.detect = function(images, options, callback) {
safeSearch: 'SAFE_SEARCH_DETECTION',
+ similar: 'WEB_DETECTION',
+
text: 'TEXT_DETECTION'
};
var typeShortNameToRespName = {
+ crop: 'cropHintsAnnotation',
+ crops: 'cropHintsAnnotation',
+
+ doc: 'fullTextAnnotation',
+ document: 'fullTextAnnotation',
+
face: 'faceAnnotations',
faces: 'faceAnnotations',
@@ -344,17 +344,22 @@ Vision.prototype.detect = function(images, options, callback) {
safeSearch: 'safeSearchAnnotation',
+ similar: 'webDetection',
+
text: 'textAnnotations'
};
var typeRespNameToShortName = {
+ cropHintsAnnotation: 'crops',
faceAnnotations: 'faces',
+ fullTextAnnotation: 'document',
imagePropertiesAnnotation: 'properties',
labelAnnotations: 'labels',
landmarkAnnotations: 'landmarks',
logoAnnotations: 'logos',
safeSearchAnnotation: 'safeSearch',
- textAnnotations: 'text'
+ textAnnotations: 'text',
+ webDetection: 'similar'
};
Vision.findImages_(images, function(err, foundImages) {
@@ -375,9 +380,11 @@ Vision.prototype.detect = function(images, options, callback) {
var cfg = {
image: image,
- features: {
- type: typeName
- }
+ features: [
+ {
+ type: typeName
+ }
+ ]
};
if (is.object(options.imageContext)) {
@@ -385,7 +392,7 @@ Vision.prototype.detect = function(images, options, callback) {
}
if (is.number(options.maxResults)) {
- cfg.features.maxResults = options.maxResults;
+ cfg.features.map(propAssign('maxResults', options.maxResults));
}
config.push(cfg);
@@ -403,6 +410,7 @@ Vision.prototype.detect = function(images, options, callback) {
var detections = foundImages
.map(groupDetectionsByImage)
+ .map(removeExtraneousAnnotationObjects)
.map(assignTypeToEmptyAnnotations)
.map(removeDetectionsWithErrors)
.map(flattenAnnotations)
@@ -445,6 +453,37 @@ Vision.prototype.detect = function(images, options, callback) {
return annotations.splice(0, types.length);
}
+ function removeExtraneousAnnotationObjects(annotations) {
+ // The API response includes empty annotations for features that weren't
+ // requested.
+ //
+ // Before:
+ // [
+ // {
+ // faceAnnotations: {},
+ // labelAnnotations: {}
+ // }
+ // ]
+ //
+ // After:
+ // [
+ // {
+ // faceAnnotations: {}
+ // }
+ // ]
+ return annotations.map(function(annotation, index) {
+ var requestedAnnotationType = typeShortNameToRespName[types[index]];
+
+ for (var prop in annotation) {
+ if (prop !== requestedAnnotationType && prop !== 'error') {
+ delete annotation[prop];
+ }
+ }
+
+ return annotation;
+ });
+ }
+
function assignTypeToEmptyAnnotations(annotations) {
// Before:
// [
@@ -492,9 +531,7 @@ Vision.prototype.detect = function(images, options, callback) {
var errors = [];
annotations.forEach(function(annotation, index) {
- var annotationKey = Object.keys(annotation)[0];
-
- if (annotationKey === 'error') {
+ if (!is.empty(annotation.error)) {
var userInputType = types[index];
var respNameType = typeShortNameToRespName[userInputType];
annotation.error.type = typeRespNameToShortName[respNameType];
@@ -525,14 +562,17 @@ Vision.prototype.detect = function(images, options, callback) {
}
var formatMethodMap = {
- errors: Vision.formatError_,
+ cropHintsAnnotation: Vision.formatCropHintsAnnotation_,
+ error: Vision.formatError_,
faceAnnotations: Vision.formatFaceAnnotation_,
+ fullTextAnnotation: Vision.formatFullTextAnnotation_,
imagePropertiesAnnotation: Vision.formatImagePropertiesAnnotation_,
labelAnnotations: Vision.formatEntityAnnotation_,
landmarkAnnotations: Vision.formatEntityAnnotation_,
logoAnnotations: Vision.formatEntityAnnotation_,
safeSearchAnnotation: Vision.formatSafeSearchAnnotation_,
- textAnnotations: Vision.formatEntityAnnotation_
+ textAnnotations: Vision.formatEntityAnnotation_,
+ webDetection: Vision.formatWebDetection_
};
var formatMethod = formatMethodMap[type] || function(annotation) {
@@ -570,7 +610,7 @@ Vision.prototype.detect = function(images, options, callback) {
if (types.length === 1) {
// Only a single detection type was asked for, so no need to box in
// the results. Make them accessible without using a key.
- var key = Object.keys(annotations)[0];
+ var key = typeRespNameToShortName[typeShortNameToRespName[types[0]]];
annotations = annotations[key];
}
@@ -579,8 +619,94 @@ Vision.prototype.detect = function(images, options, callback) {
});
});
};
+
// jscs:enable maximumLineLength
+/**
+ * Detect the crop hints within an image.
+ *
+ *
Parameters
+ *
+ * See {module:vision#detect}.
+ *
+ * @resource [CropHintsAnnotation JSON respresentation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#CropHintsAnnotation}
+ *
+ * @example
+ * vision.detectCrops('image.jpg', function(err, crops, apiResponse) {
+ * // crops = [
+ * // [
+ * // {
+ * // x: 1
+ * // },
+ * // {
+ * // x: 295
+ * // },
+ * // {
+ * // x: 295,
+ * // y: 301
+ * // },
+ * // {
+ * // x: 1,
+ * // y: 301
+ * // }
+ * // ],
+ * // // ...
+ * // ]
+ * });
+ *
+ * //-
+ * // Activate `verbose` mode for a more detailed response.
+ * //-
+ * var options = {
+ * verbose: true
+ * };
+ *
+ * vision.detectCrops('image.jpg', options, function(err, crops, apiResponse) {
+ * // crops = [
+ * // {
+ * // bounds: [
+ * // {
+ * // x: 1
+ * // },
+ * // {
+ * // x: 295
+ * // },
+ * // {
+ * // x: 295,
+ * // y: 301
+ * // },
+ * // {
+ * // x: 1,
+ * // y: 301
+ * // }
+ * // ],
+ * // confidence: 0.799999995
+ * // },
+ * // // ...
+ * // ]
+ * });
+ *
+ * //-
+ * // If the callback is omitted, we'll return a Promise.
+ * //-
+ * vision.detectCrops('image.jpg').then(function(data) {
+ * var crops = data[0];
+ * var apiResponse = data[1];
+ * });
+ */
+Vision.prototype.detectCrops = function(images, options, callback) {
+ if (is.fn(options)) {
+ callback = options;
+ options = {};
+ }
+
+ options = extend({}, options, {
+ types: ['crops']
+ });
+
+ this.detect(images, options, callback);
+};
+
/**
* Run face detection against an image.
*
@@ -1274,6 +1400,76 @@ Vision.prototype.detectSafeSearch = function(images, options, callback) {
this.detect(images, options, callback);
};
+/**
+ * Detect similar images from the internet.
+ *
+ * Parameters
+ *
+ * See {module:vision#detect}.
+ *
+ * @resource [WebAnnotation JSON representation]{@link https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#WebAnnotation}
+ *
+ * @example
+ * vision.detectSimilar('image.jpg', function(err, images, apiResponse) {
+ * // images = [
+ * // 'http://www.example.com/most-similar-image',
+ * // // ...
+ * // 'http://www.example.com/least-similar-image'
+ * // ]
+ * });
+ *
+ * //-
+ * // Activate `verbose` mode for a more detailed response.
+ * //-
+ * var opts = {
+ * verbose: true
+ * };
+ *
+ * vision.detectSimilar('image.jpg', opts, function(err, similar, apiResponse) {
+ * // similar = {
+ * // entities: [
+ * // 'Logo',
+ * // // ...
+ * // ],
+ * // fullMatches: [
+ * // 'http://www.example.com/most-similar-image',
+ * // // ...
+ * // 'http://www.example.com/least-similar-image'
+ * // ],
+ * // partialMatches: [
+ * // 'http://www.example.com/most-similar-image',
+ * // // ...
+ * // 'http://www.example.com/least-similar-image'
+ * // ],
+ * // pages: [
+ * // 'http://www.example.com/page-with-most-similar-image',
+ * // // ...
+ * // 'http://www.example.com/page-with-least-similar-image'
+ * // ]
+ * // }
+ * });
+ *
+ * //-
+ * // If the callback is omitted, we'll return a Promise.
+ * //-
+ * vision.detectSimilar('image.jpg').then(function(data) {
+ * var images = data[0];
+ * var apiResponse = data[1];
+ * });
+ */
+Vision.prototype.detectSimilar = function(images, options, callback) {
+ if (is.fn(options)) {
+ callback = options;
+ options = {};
+ }
+
+ options = extend({}, options, {
+ types: ['similar']
+ });
+
+ this.detect(images, options, callback);
+};
+
/**
* Detect the text within an image.
*
@@ -1301,20 +1497,20 @@ Vision.prototype.detectSafeSearch = function(images, options, callback) {
* // desc: 'This was text found in the image',
* // bounds: [
* // {
- * // x: 4,
- * // y: 5
+ * // x: 4,
+ * // y: 5
* // },
* // {
- * // x: 493,
- * // y: 5
+ * // x: 493,
+ * // y: 5
* // },
* // {
- * // x: 493,
- * // y: 89
+ * // x: 493,
+ * // y: 89
* // },
* // {
- * // x: 4,
- * // y: 89
+ * // x: 4,
+ * // y: 89
* // }
* // ]
* // }
@@ -1342,11 +1538,159 @@ Vision.prototype.detectText = function(images, options, callback) {
this.detect(images, options, callback);
};
+/**
+ * Annotate a document.
+ *
+ * Parameters
+ *
+ * See {module:vision#detect}.
+ *
+ * @resource [FullTextAnnotation JSON representation]{@link https://cloud.google.com/vision/reference/rest/v1/images/annotate#FullTextAnnotation}
+ *
+ * @example
+ * vision.readDocument('image.jpg', function(err, text, apiResponse) {
+ * // text = 'This paragraph was extracted from image.jpg';
+ * });
+ *
+ * //-
+ * // Activate `verbose` mode for a more detailed response.
+ * //-
+ * var opts = {
+ * verbose: true
+ * };
+ *
+ * vision.readDocument('image.jpg', opts, function(err, pages, apiResponse) {
+ * // pages = [
+ * // {
+ * // languages: [
+ * // 'en'
+ * // ],
+ * // width: 688,
+ * // height: 1096,
+ * // blocks: [
+ * // {
+ * // type: 'TEXT',
+ * // bounds: [
+ * // {
+ * // x: 4,
+ * // y: 5
+ * // },
+ * // {
+ * // x: 493,
+ * // y: 5
+ * // },
+ * // {
+ * // x: 493,
+ * // y: 89
+ * // },
+ * // {
+ * // x: 4,
+ * // y: 89
+ * // }
+ * // ],
+ * // paragraphs: [
+ * // {
+ * // bounds: [
+ * // {
+ * // x: 4,
+ * // y: 5
+ * // },
+ * // {
+ * // x: 493,
+ * // y: 5
+ * // },
+ * // {
+ * // x: 493,
+ * // y: 89
+ * // },
+ * // {
+ * // x: 4,
+ * // y: 89
+ * // }
+ * // ],
+ * // words: [
+ * // {
+ * // bounds: [
+ * // {
+ * // x: 4,
+ * // y: 5
+ * // },
+ * // {
+ * // x: 493,
+ * // y: 5
+ * // },
+ * // {
+ * // x: 493,
+ * // y: 89
+ * // },
+ * // {
+ * // x: 4,
+ * // y: 89
+ * // }
+ * // ],
+ * // symbols: [
+ * // {
+ * // bounds: [
+ * // {
+ * // x: 4,
+ * // y: 5
+ * // },
+ * // {
+ * // x: 493,
+ * // y: 5
+ * // },
+ * // {
+ * // x: 493,
+ * // y: 89
+ * // },
+ * // {
+ * // x: 4,
+ * // y: 89
+ * // }
+ * // ],
+ * // text: 'T'
+ * // },
+ * // // ...
+ * // ]
+ * // },
+ * // // ...
+ * // ]
+ * // },
+ * // // ...
+ * // ]
+ * // },
+ * // // ...
+ * // ]
+ * // }
+ * // ]
+ * });
+ *
+ * //-
+ * // If the callback is omitted, we'll return a Promise.
+ * //-
+ * vision.readDocument('image.jpg').then(function(data) {
+ * var pages = data[0];
+ * var apiResponse = data[1];
+ * });
+ */
+Vision.prototype.readDocument = function(images, options, callback) {
+ if (is.fn(options)) {
+ callback = options;
+ options = {};
+ }
+
+ options = extend({}, options, {
+ types: ['document']
+ });
+
+ this.detect(images, options, callback);
+};
+
/**
* Determine the type of image the user is asking to be annotated. If a
* {module:storage/file}, convert to its "gs://{bucket}/{file}" URL. If a remote
- * URL, read the contents and convert to a base64 string. If a file path to a
- * local file, convert to a base64 string.
+ * URL, format as the API expects. If a file path to a local file, convert to a
+ * base64 string.
*
* @private
*/
@@ -1371,25 +1715,16 @@ Vision.findImages_ = function(images, callback) {
})
}
});
-
return;
}
// File is a URL.
if (/^http/.test(image)) {
- request({
- method: 'GET',
- uri: image,
- encoding: 'base64'
- }, function(err, resp, body) {
- if (err) {
- callback(err);
- return;
+ callback(null, {
+ source: {
+ imageUri: image
}
-
- callback(null, { content: body });
});
-
return;
}
@@ -1407,6 +1742,22 @@ Vision.findImages_ = function(images, callback) {
async.mapLimit(images, MAX_PARALLEL_LIMIT, findImage, callback);
};
+/**
+ * Format a raw crop hint annotation response from the API.
+ *
+ * @private
+ */
+Vision.formatCropHintsAnnotation_ = function(cropHintsAnnotation, options) {
+ return cropHintsAnnotation.cropHints.map(function(cropHint) {
+ cropHint = {
+ bounds: cropHint.boundingPoly.vertices,
+ confidence: cropHint.confidence
+ };
+
+ return options.verbose ? cropHint : cropHint.bounds;
+ });
+};
+
/**
* Format a raw entity annotation response from the API.
*
@@ -1461,6 +1812,8 @@ Vision.formatError_ = function(err) {
err.code = httpError.code;
}
+ delete err.details;
+
return err;
};
@@ -1569,6 +1922,49 @@ Vision.formatFaceAnnotation_ = function(faceAnnotation) {
return formattedFaceAnnotation;
};
+/**
+ * Format a raw full text annotation response from the API.
+ *
+ * @private
+ */
+Vision.formatFullTextAnnotation_ = function(fullTextAnnotation, options) {
+ if (!options.verbose) {
+ return fullTextAnnotation.text;
+ }
+
+ return fullTextAnnotation.pages
+ .map(function(page) {
+ return {
+ languages: page.property.detectedLanguages.map(prop('languageCode')),
+ width: page.width,
+ height: page.height,
+ blocks: page.blocks.map(function(block) {
+ return {
+ type: block.blockType,
+ bounds: block.boundingBox && block.boundingBox.vertices || [],
+ paragraphs: arrify(block.paragraphs)
+ .map(function(paragraph) {
+ return {
+ bounds: paragraph.boundingBox.vertices,
+ words: paragraph.words.map(function(word) {
+ return {
+ bounds: word.boundingBox.vertices,
+ symbols: word.symbols.map(function(symbol) {
+ return {
+ bounds: symbol.boundingBox.vertices,
+ text: symbol.text
+ };
+ })
+ };
+ })
+ };
+ })
+ };
+ })
+ };
+ });
+};
+
/**
* Format a raw image properties annotation response from the API.
*
@@ -1624,6 +2020,41 @@ Vision.formatSafeSearchAnnotation_ = function(ssAnnotation, options) {
return ssAnnotation;
};
+/**
+ * Format a raw web detection response from the API.
+ *
+ * @private
+ */
+Vision.formatWebDetection_ = function(webDetection, options) {
+ function sortByScore(a, b) {
+ return a.score < b.score ? 1 : a.score > b.score ? -1 : 0;
+ }
+
+ var formattedWebDetection = {
+ entities: arrify(webDetection.webEntities).map(prop('description')),
+
+ fullMatches: arrify(webDetection.fullMatchingImages)
+ .sort(sortByScore)
+ .map(prop('url')),
+
+ partialMatches: arrify(webDetection.partialMatchingImages)
+ .sort(sortByScore)
+ .map(prop('url')),
+
+ pages: arrify(webDetection.pagesWithMatchingImages)
+ .sort(sortByScore)
+ .map(prop('url'))
+ };
+
+ if (!options.verbose) {
+ // Combine all matches.
+ formattedWebDetection = formattedWebDetection.fullMatches
+ .concat(formattedWebDetection.partialMatches);
+ }
+
+ return formattedWebDetection;
+};
+
/**
* Convert a "likelihood" value to a boolean representation, based on the lowest
* likelihood provided.
@@ -1649,4 +2080,4 @@ Vision.gteLikelihood_ = function(baseLikelihood, likelihood) {
common.util.promisifyAll(Vision);
module.exports = Vision;
-module.exports.v1 = require('./v1');
+module.exports.v1 = v1;
diff --git a/packages/vision/src/v1/image_annotator_client.js b/packages/vision/src/v1/image_annotator_client.js
index 515aa2949c8..c5ad6acdf5d 100644
--- a/packages/vision/src/v1/image_annotator_client.js
+++ b/packages/vision/src/v1/image_annotator_client.js
@@ -46,9 +46,9 @@ var ALL_SCOPES = [
];
/**
- * Service that performs Cloud Vision API detection tasks, such as face,
- * landmark, logo, label, and text detection, over client images, and returns
- * detected entities from the images.
+ * Service that performs Google Cloud Vision API detection tasks over client
+ * images, such as face, landmark, logo, label, and text detection. The
+ * ImageAnnotator service returns detected entities from the images.
*
* This will be created through a builder function which can be obtained by the module.
* See the following example of how to initialize the module and how to access to the builder.
@@ -196,4 +196,4 @@ function ImageAnnotatorClientBuilder(gaxGrpc) {
}
module.exports = ImageAnnotatorClientBuilder;
module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS;
-module.exports.ALL_SCOPES = ALL_SCOPES;
\ No newline at end of file
+module.exports.ALL_SCOPES = ALL_SCOPES;
diff --git a/packages/vision/system-test/data/document.jpg b/packages/vision/system-test/data/document.jpg
new file mode 100644
index 00000000000..9152604fdde
Binary files /dev/null and b/packages/vision/system-test/data/document.jpg differ
diff --git a/packages/vision/system-test/vision.js b/packages/vision/system-test/vision.js
index b2e565a6309..aa495a798b8 100644
--- a/packages/vision/system-test/vision.js
+++ b/packages/vision/system-test/vision.js
@@ -19,7 +19,6 @@
var assert = require('assert');
var async = require('async');
var fs = require('fs');
-var http = require('http');
var is = require('is');
var multiline = require('multiline');
var normalizeNewline = require('normalize-newline');
@@ -32,6 +31,7 @@ var Vision = require('../');
describe('Vision', function() {
var IMAGES = {
+ document: path.join(__dirname, 'data/document.jpg'),
logo: path.join(__dirname, 'data/logo.jpg'),
rushmore: path.join(__dirname, 'data/rushmore.jpg'),
text: path.join(__dirname, 'data/text.png'),
@@ -82,25 +82,14 @@ describe('Vision', function() {
});
it('should detect from a URL', function(done) {
- var server = http.createServer(function(req, res) {
- fs.readFile(IMAGES.logo, function(err, resp) {
- assert.ifError(err);
- res.end(resp);
- });
- });
+ var url = 'https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png';
- server.listen(8800, function(err) {
+ vision.detect(url, ['logos'], function(err, logos) {
assert.ifError(err);
- var url = 'http://localhost:8800/logo.png';
-
- vision.detect(url, ['logos'], function(err, logos) {
- assert.ifError(err);
-
- assert.deepEqual(logos, ['Google']);
+ assert.deepEqual(logos, ['Google']);
- done();
- });
+ done();
});
});
@@ -247,6 +236,59 @@ describe('Vision', function() {
});
});
+ describe('crops', function() {
+ it('should detect crops from an image', function(done) {
+ vision.detectCrops(IMAGES.logo, function(err, crops) {
+ assert.ifError(err);
+ assert.strictEqual(crops.length, 1);
+ assert.strictEqual(crops[0].length, 4);
+ done();
+ });
+ });
+
+ it('should detect crops from multiple images', function(done) {
+ vision.detectCrops([
+ IMAGES.logo,
+ IMAGES.rushmore
+ ], function(err, crops) {
+ assert.ifError(err);
+
+ assert.strictEqual(crops.length, 2);
+ assert.strictEqual(crops[0][0].length, 4);
+ assert.strictEqual(crops[1][0].length, 4);
+
+ done();
+ });
+ });
+ });
+
+ describe('documents', function() {
+ it('should detect text from a document', function(done) {
+ vision.readDocument(IMAGES.document, function(err, text) {
+ assert.ifError(err);
+
+ assert.strictEqual(typeof text, 'string');
+
+ done();
+ });
+ });
+
+ it('should detect pages from multiple documents', function(done) {
+ vision.readDocument([
+ IMAGES.document,
+ IMAGES.logo
+ ], function(err, pages) {
+ assert.ifError(err);
+
+ assert.strictEqual(pages.length, 2);
+ assert(typeof pages[0], 'object');
+ assert(typeof pages[1], 'object');
+
+ done();
+ });
+ });
+ });
+
describe('faces', function() {
it('should detect faces from an image', function(done) {
vision.detectFaces(IMAGES.rushmore, function(err, faces) {
@@ -508,6 +550,32 @@ describe('Vision', function() {
});
});
+ describe('similar', function() {
+ it('should detect similar images from the internet', function(done) {
+ vision.detectSimilar(IMAGES.logo, function(err, images) {
+ assert.ifError(err);
+ assert(images.length > -1);
+ done();
+ });
+ });
+
+ it('should detect similar images from multiple images', function(done) {
+ vision.detectSimilar([
+ IMAGES.logo,
+ IMAGES.rushmore
+ ], function(err, images) {
+ assert.ifError(err);
+
+ assert.strictEqual(images.length, 2);
+
+ assert(images[0].length > -1);
+ assert(images[1].length > -1);
+
+ done();
+ });
+ });
+ });
+
describe('text', function() {
var expectedResults = [
normalizeNewline(multiline.stripIndent(function() {/*
diff --git a/packages/vision/test/index.js b/packages/vision/test/index.js
index 2b5dfd3384e..4c0821b5220 100644
--- a/packages/vision/test/index.js
+++ b/packages/vision/test/index.js
@@ -22,10 +22,8 @@ var deepStrictEqual = require('deep-strict-equal');
var extend = require('extend');
var fs = require('fs');
var GrpcService = require('@google-cloud/common-grpc').Service;
-var nodeutil = require('util');
var prop = require('propprop');
var proxyquire = require('proxyquire');
-var Service = require('@google-cloud/common').Service;
var tmp = require('tmp');
var util = require('@google-cloud/common').util;
@@ -38,17 +36,16 @@ var fakeUtil = extend({}, util, {
}
});
-function FakeService() {
- this.calledWith_ = arguments;
- Service.apply(this, arguments);
-}
-
-nodeutil.inherits(FakeService, Service);
+var fakeV1Override;
+function fakeV1() {
+ if (fakeV1Override) {
+ return fakeV1Override.apply(null, arguments);
+ }
-var requestOverride = null;
-var fakeRequest = function() {
- return (requestOverride || util.noop).apply(this, arguments);
-};
+ return {
+ imageAnnotatorClient: util.noop
+ };
+}
describe('Vision', function() {
var IMAGE = './image.jpg';
@@ -58,29 +55,34 @@ describe('Vision', function() {
var VisionCached;
var vision;
+ var OPTIONS = {
+ projectId: PROJECT_ID
+ };
+
before(function() {
Vision = proxyquire('../', {
'@google-cloud/common': {
- Service: FakeService,
util: fakeUtil
},
- request: fakeRequest
+ './v1': fakeV1
});
VisionCached = extend({}, Vision);
});
beforeEach(function() {
- requestOverride = null;
+ fakeV1Override = null;
- extend(Vision, VisionCached);
+ vision = new Vision(OPTIONS);
- vision = new Vision({
- projectId: PROJECT_ID
- });
+ extend(Vision, VisionCached);
});
describe('instantiation', function() {
+ it('should promisify all the things', function() {
+ assert(promisified);
+ });
+
it('should normalize the arguments', function() {
var normalizeArguments = fakeUtil.normalizeArguments;
var normalizeArgumentsCalled = false;
@@ -100,22 +102,25 @@ describe('Vision', function() {
fakeUtil.normalizeArguments = normalizeArguments;
});
- it('should inherit from Service', function() {
- assert(vision instanceof Service);
+ it('should create a gax api client', function() {
+ var expectedVisionClient = {};
- var calledWith = vision.calledWith_[0];
+ fakeV1Override = function(options) {
+ assert.strictEqual(options, OPTIONS);
- var baseUrl = 'https://vision.googleapis.com/v1';
- assert.strictEqual(calledWith.baseUrl, baseUrl);
- assert.strictEqual(calledWith.projectIdRequired, false);
- assert.deepEqual(calledWith.scopes, [
- 'https://www.googleapis.com/auth/cloud-platform'
- ]);
- assert.deepEqual(calledWith.packageJson, require('../package.json'));
- });
+ return {
+ imageAnnotatorClient: function(options) {
+ assert.strictEqual(options, OPTIONS);
+ return expectedVisionClient;
+ }
+ };
+ };
- it('should promisify all the things', function() {
- assert(promisified);
+ var vision = new Vision(OPTIONS);
+
+ assert.deepEqual(vision.api, {
+ Vision: expectedVisionClient
+ });
});
});
@@ -133,35 +138,27 @@ describe('Vision', function() {
var REQ = {};
it('should arrify request objects', function(done) {
- vision.request = function(reqOpts) {
- assert.strictEqual(reqOpts.json.requests[0], REQ);
- done();
+ vision.api.Vision = {
+ batchAnnotateImages: function(reqOpts) {
+ assert.deepEqual(reqOpts, {
+ requests: [REQ]
+ });
+ done();
+ }
};
vision.annotate(REQ, assert.ifError);
});
- it('should make the correct API request', function(done) {
- var requests = [REQ, REQ];
-
- vision.request = function(reqOpts) {
- assert.strictEqual(reqOpts.method, 'POST');
- assert.strictEqual(reqOpts.uri, 'images:annotate');
- assert.strictEqual(reqOpts.json.requests, requests);
-
- done();
- };
-
- vision.annotate(requests, assert.ifError);
- });
-
describe('error', function() {
var error = new Error('Error.');
var apiResponse = {};
beforeEach(function() {
- vision.request = function(reqOpts, callback) {
- callback(error, apiResponse);
+ vision.api.Vision = {
+ batchAnnotateImages: function(reqOpts, callback) {
+ callback(error, apiResponse);
+ }
};
});
@@ -181,8 +178,10 @@ describe('Vision', function() {
};
beforeEach(function() {
- vision.request = function(reqOpts, callback) {
- callback(null, apiResponse);
+ vision.api.Vision = {
+ batchAnnotateImages: function(reqOpts, callback) {
+ callback(null, apiResponse);
+ }
};
});
@@ -200,12 +199,6 @@ describe('Vision', function() {
});
describe('detect', function() {
- var findImages_;
- var formatFaceAnnotation_;
- var formatImagePropertiesAnnotation_;
- var formatEntityAnnotation_;
- var formatSafeSearchAnnotation_;
-
var TYPES = [
'face',
'label'
@@ -222,14 +215,6 @@ describe('Vision', function() {
IMAGES[0]
];
- before(function() {
- findImages_ = Vision.findImages_;
- formatFaceAnnotation_ = Vision.formatFaceAnnotation_;
- formatImagePropertiesAnnotation_ =
- Vision.formatImagePropertiesAnnotation_;
- formatEntityAnnotation_ = Vision.formatEntityAnnotation_;
- formatSafeSearchAnnotation_ = Vision.formatSafeSearchAnnotation_;
- });
beforeEach(function() {
Vision.findImages_ = function(images, callback) {
@@ -237,15 +222,6 @@ describe('Vision', function() {
};
});
- after(function() {
- Vision.findImages_ = findImages_;
- Vision.formatFaceAnnotation_ = formatFaceAnnotation_;
- Vision.formatImagePropertiesAnnotation_ =
- formatImagePropertiesAnnotation_;
- Vision.formatEntityAnnotation_ = formatEntityAnnotation_;
- Vision.formatSafeSearchAnnotation_ = formatSafeSearchAnnotation_;
- });
-
it('should find the images', function(done) {
Vision.findImages_ = function(images) {
assert.strictEqual(images, IMAGE);
@@ -279,6 +255,12 @@ describe('Vision', function() {
it('should format the correct config', function(done) {
var typeShortNameToFullName = {
+ crop: 'CROP_HINTS',
+ crops: 'CROP_HINTS',
+
+ doc: 'DOCUMENT_TEXT_DETECTION',
+ document: 'DOCUMENT_TEXT_DETECTION',
+
face: 'FACE_DETECTION',
faces: 'FACE_DETECTION',
@@ -295,6 +277,8 @@ describe('Vision', function() {
safeSearch: 'SAFE_SEARCH_DETECTION',
+ similar: 'WEB_DETECTION',
+
text: 'TEXT_DETECTION'
};
@@ -305,9 +289,11 @@ describe('Vision', function() {
assert.deepEqual(config, [
{
image: IMAGES[0],
- features: {
- type: typeShortNameToFullName[shortName]
- }
+ features: [
+ {
+ type: typeShortNameToFullName[shortName]
+ }
+ ]
}
]);
@@ -338,9 +324,11 @@ describe('Vision', function() {
assert.deepEqual(config, [
{
image: IMAGES[0],
- features: {
- type: 'LABEL_DETECTION'
- },
+ features: [
+ {
+ type: 'LABEL_DETECTION'
+ }
+ ],
imageContext: imageContext
}
]);
@@ -361,10 +349,12 @@ describe('Vision', function() {
assert.deepEqual(config, [
{
image: IMAGES[0],
- features: {
- type: 'FACE_DETECTION',
- maxResults: 10
- }
+ features: [
+ {
+ type: 'FACE_DETECTION',
+ maxResults: 10
+ }
+ ]
}
]);
@@ -398,37 +388,57 @@ describe('Vision', function() {
it('should return the correct detections', function(done) {
var annotations = [
{
- faceAnnotations: {}
+ cropHintsAnnotation: { anno: true }
},
{
- imagePropertiesAnnotation: {}
+ faceAnnotations: { anno: true }
+ },
+ {
+ fullTextAnnotation: { anno: true }
+ },
+ {
+ imagePropertiesAnnotation: { anno: true }
},
{
- labelAnnotations: {}
+ labelAnnotations: { anno: true }
},
{
- landmarkAnnotations: {}
+ landmarkAnnotations: { anno: true }
},
{
- logoAnnotations: {}
+ logoAnnotations: { anno: true }
},
{
- safeSearchAnnotation: {}
+ safeSearchAnnotation: { anno: true }
},
{
- textAnnotations: {}
+ textAnnotations: { anno: true }
+ },
+ {
+ webDetection: { anno: true }
}
];
+ var cropHintsAnnotation = {};
var faceAnnotation = {};
+ var fullTextAnnotation = {};
var imagePropertiesAnnotation = {};
var entityAnnotation = {};
var safeSearchAnnotation = {};
+ var webDetection = {};
+
+ Vision.formatCropHintsAnnotation_ = function() {
+ return cropHintsAnnotation;
+ };
Vision.formatFaceAnnotation_ = function() {
return faceAnnotation;
};
+ Vision.formatFullTextAnnotation_ = function() {
+ return fullTextAnnotation;
+ };
+
Vision.formatImagePropertiesAnnotation_ = function() {
return imagePropertiesAnnotation;
};
@@ -441,27 +451,32 @@ describe('Vision', function() {
return safeSearchAnnotation;
};
+ Vision.formatWebDetection_ = function() {
+ return webDetection;
+ };
+
vision.annotate = function(config, callback) {
callback(null, annotations);
};
var expected = {
+ crops: cropHintsAnnotation,
faces: faceAnnotation,
+ document: fullTextAnnotation,
properties: imagePropertiesAnnotation,
labels: entityAnnotation,
landmarks: entityAnnotation,
logos: entityAnnotation,
safeSearch: safeSearchAnnotation,
- text: entityAnnotation
+ text: entityAnnotation,
+ similar: webDetection
};
var types = Object.keys(expected);
vision.detect(IMAGE, types, function(err, detections) {
assert.ifError(err);
-
assert(deepStrictEqual(detections, expected));
-
done();
});
});
@@ -495,8 +510,8 @@ describe('Vision', function() {
});
it('should return partial failure errors', function(done) {
- var error1 = {};
- var error2 = {};
+ var error1 = { error: true };
+ var error2 = { error: true };
var annotations = [
{ error: error1 },
@@ -540,10 +555,10 @@ describe('Vision', function() {
});
it('should return partial failure errors for multi images', function(done) {
- var error1 = {};
- var error2 = {};
- var error3 = {};
- var error4 = {};
+ var error1 = { error: true };
+ var error2 = { error: true };
+ var error3 = { error: true };
+ var error4 = { error: true };
var annotations = [
{ error: error1 },
@@ -802,6 +817,25 @@ describe('Vision', function() {
});
});
+ describe('detectCrops', function() {
+ it('should accept a callback only', function(done) {
+ vision.detect = testWithoutOptions('crops');
+
+ vision.detectCrops(IMAGE, done);
+ });
+
+ it('should accept options', function(done) {
+ var options = {
+ a: 'b',
+ c: 'd'
+ };
+
+ vision.detect = testWithOptions('crops', options);
+
+ vision.detectCrops(IMAGE, options, done);
+ });
+ });
+
describe('detectFaces', function() {
it('should accept a callback only', function(done) {
vision.detect = testWithoutOptions('faces');
@@ -916,6 +950,25 @@ describe('Vision', function() {
});
});
+ describe('detectSimilar', function() {
+ it('should accept a callback only', function(done) {
+ vision.detect = testWithoutOptions('similar');
+
+ vision.detectSimilar(IMAGE, done);
+ });
+
+ it('should accept options', function(done) {
+ var options = {
+ a: 'b',
+ c: 'd'
+ };
+
+ vision.detect = testWithOptions('similar', options);
+
+ vision.detectSimilar(IMAGE, options, done);
+ });
+ });
+
describe('detectText', function() {
it('should accept a callback only', function(done) {
vision.detect = testWithoutOptions('text');
@@ -935,6 +988,25 @@ describe('Vision', function() {
});
});
+ describe('readDocument', function() {
+ it('should accept a callback only', function(done) {
+ vision.detect = testWithoutOptions('document');
+
+ vision.readDocument(IMAGE, done);
+ });
+
+ it('should accept options', function(done) {
+ var options = {
+ a: 'b',
+ c: 'd'
+ };
+
+ vision.detect = testWithOptions('document', options);
+
+ vision.readDocument(IMAGE, options, done);
+ });
+ });
+
describe('findImages_', function() {
it('should convert a File object', function(done) {
var file = {
@@ -968,44 +1040,22 @@ describe('Vision', function() {
});
});
- it('should get a file from a URL', function(done) {
+ it('should properly format a URL', function(done) {
var imageUri = 'http://www.google.com/logo.png';
- var body = 'body';
-
- requestOverride = function(reqOpts, callback) {
- assert.strictEqual(reqOpts.method, 'GET');
- assert.strictEqual(reqOpts.uri, imageUri);
- assert.strictEqual(reqOpts.encoding, 'base64');
-
- callback(null, {}, body);
- };
Vision.findImages_(imageUri, function(err, images) {
assert.ifError(err);
assert.deepEqual(images, [
{
- content: body
+ source: {
+ imageUri: imageUri
+ }
}
]);
done();
});
});
- it('should return an error from reading a URL', function(done) {
- var imageUri = 'http://www.google.com/logo.png';
-
- var error = new Error('Error.');
-
- requestOverride = function(reqOpts, callback) {
- callback(error);
- };
-
- Vision.findImages_(imageUri, function(err) {
- assert.strictEqual(err, error);
- done();
- });
- });
-
it('should read from a file path', function(done) {
tmp.setGracefulCleanup();
@@ -1060,6 +1110,53 @@ describe('Vision', function() {
});
});
+ describe('formatCropHintsAnnotation_', function() {
+ var VERTICES = [
+ { x: 0, y: 0 },
+ { x: 0, y: 0 }
+ ];
+
+ var CONFIDENCE = 0.3;
+
+ var cropHintsAnnotation = {
+ cropHints: [
+ {
+ boundingPoly: {
+ vertices: VERTICES
+ },
+ confidence: CONFIDENCE
+ }
+ ]
+ };
+
+ describe('verbose: false', function() {
+ var opts = {};
+
+ it('should format the annotation', function() {
+ var fmtd = Vision.formatCropHintsAnnotation_(cropHintsAnnotation, opts);
+
+ assert.deepEqual(fmtd, [
+ VERTICES
+ ]);
+ });
+ });
+
+ describe('verbose: true', function() {
+ var opts = { verbose: true };
+
+ it('should format the annotation', function() {
+ var fmtd = Vision.formatCropHintsAnnotation_(cropHintsAnnotation, opts);
+
+ assert.deepEqual(fmtd, [
+ {
+ bounds: VERTICES,
+ confidence: CONFIDENCE
+ }
+ ]);
+ });
+ });
+ });
+
describe('formatEntityAnnotation_', function() {
var entityAnnotation = {
description: 'description',
@@ -1109,7 +1206,10 @@ describe('Vision', function() {
describe('formatError_', function() {
var error = {
code: 1,
- message: 'Oh no!'
+ message: 'Oh no!',
+ details: [
+ 'these should be clipped'
+ ]
};
it('should format an error', function() {
@@ -1123,174 +1223,170 @@ describe('Vision', function() {
});
describe('formatFaceAnnotation_', function() {
- var faceAnnotation;
+ var faceAnnotation = {
+ panAngle: {},
+ rollAngle: {},
+ tiltAngle: {},
+
+ boundingPoly: {
+ vertices: {}
+ },
+ fdBoundingPoly: {
+ vertices: {}
+ },
- before(function() {
- faceAnnotation = {
- panAngle: {},
- rollAngle: {},
- tiltAngle: {},
+ landmarkingConfidence: 0.2,
- boundingPoly: {
- vertices: {}
+ landmarks: [
+ {
+ type: 'CHIN_GNATHION',
+ position: {}
+ },
+ {
+ type: 'CHIN_LEFT_GONION',
+ position: {}
+ },
+ {
+ type: 'CHIN_RIGHT_GONION',
+ position: {}
+ },
+ {
+ type: 'LEFT_EAR_TRAGION',
+ position: {}
+ },
+ {
+ type: 'RIGHT_EAR_TRAGION',
+ position: {}
},
- fdBoundingPoly: {
- vertices: {}
+ {
+ type: 'LEFT_OF_LEFT_EYEBROW',
+ position: {}
+ },
+ {
+ type: 'RIGHT_OF_LEFT_EYEBROW',
+ position: {}
},
+ {
+ type: 'LEFT_EYEBROW_UPPER_MIDPOINT',
+ position: {}
+ },
+ {
+ type: 'LEFT_OF_RIGHT_EYEBROW',
+ position: {}
+ },
+ {
+ type: 'RIGHT_OF_RIGHT_EYEBROW',
+ position: {}
+ },
+ {
+ type: 'RIGHT_EYEBROW_UPPER_MIDPOINT',
+ position: {}
+ },
+ {
+ type: 'LEFT_EYE_BOTTOM_BOUNDARY',
+ position: {}
+ },
+ {
+ type: 'LEFT_EYE',
+ position: {}
+ },
+ {
+ type: 'LEFT_EYE_LEFT_CORNER',
+ position: {}
+ },
+ {
+ type: 'LEFT_EYE_PUPIL',
+ position: {}
+ },
+ {
+ type: 'LEFT_EYE_RIGHT_CORNER',
+ position: {}
+ },
+ {
+ type: 'LEFT_EYE_TOP_BOUNDARY',
+ position: {}
+ },
+ {
+ type: 'RIGHT_EYE_BOTTOM_BOUNDARY',
+ position: {}
+ },
+ {
+ type: 'RIGHT_EYE',
+ position: {}
+ },
+ {
+ type: 'RIGHT_EYE_LEFT_CORNER',
+ position: {}
+ },
+ {
+ type: 'RIGHT_EYE_PUPIL',
+ position: {}
+ },
+ {
+ type: 'RIGHT_EYE_RIGHT_CORNER',
+ position: {}
+ },
+ {
+ type: 'RIGHT_EYE_TOP_BOUNDARY',
+ position: {}
+ },
+ {
+ type: 'FOREHEAD_GLABELLA',
+ position: {}
+ },
+ {
+ type: 'LOWER_LIP',
+ position: {}
+ },
+ {
+ type: 'UPPER_LIP',
+ position: {}
+ },
+ {
+ type: 'MOUTH_CENTER',
+ position: {}
+ },
+ {
+ type: 'MOUTH_LEFT',
+ position: {}
+ },
+ {
+ type: 'MOUTH_RIGHT',
+ position: {}
+ },
+ {
+ type: 'NOSE_BOTTOM_CENTER',
+ position: {}
+ },
+ {
+ type: 'NOSE_BOTTOM_LEFT',
+ position: {}
+ },
+ {
+ type: 'NOSE_BOTTOM_RIGHT',
+ position: {}
+ },
+ {
+ type: 'NOSE_TIP',
+ position: {}
+ },
+ {
+ type: 'MIDPOINT_BETWEEN_EYES',
+ position: {}
+ }
+ ],
- landmarkingConfidence: 0.2,
+ detectionConfidence: 0.2,
+ blurredLikelihood: 'LIKELY',
+ underExposedLikelihood: 'LIKELY',
+ joyLikelihood: 'LIKELY',
+ headwearLikelihood: 'LIKELY',
+ angerLikelihood: 'LIKELY',
+ sorrowLikelihood: 'LIKELY',
+ surpriseLikelihood: 'LIKELY',
- landmarks: [
- {
- type: 'CHIN_GNATHION',
- position: {}
- },
- {
- type: 'CHIN_LEFT_GONION',
- position: {}
- },
- {
- type: 'CHIN_RIGHT_GONION',
- position: {}
- },
- {
- type: 'LEFT_EAR_TRAGION',
- position: {}
- },
- {
- type: 'RIGHT_EAR_TRAGION',
- position: {}
- },
- {
- type: 'LEFT_OF_LEFT_EYEBROW',
- position: {}
- },
- {
- type: 'RIGHT_OF_LEFT_EYEBROW',
- position: {}
- },
- {
- type: 'LEFT_EYEBROW_UPPER_MIDPOINT',
- position: {}
- },
- {
- type: 'LEFT_OF_RIGHT_EYEBROW',
- position: {}
- },
- {
- type: 'RIGHT_OF_RIGHT_EYEBROW',
- position: {}
- },
- {
- type: 'RIGHT_EYEBROW_UPPER_MIDPOINT',
- position: {}
- },
- {
- type: 'LEFT_EYE_BOTTOM_BOUNDARY',
- position: {}
- },
- {
- type: 'LEFT_EYE',
- position: {}
- },
- {
- type: 'LEFT_EYE_LEFT_CORNER',
- position: {}
- },
- {
- type: 'LEFT_EYE_PUPIL',
- position: {}
- },
- {
- type: 'LEFT_EYE_RIGHT_CORNER',
- position: {}
- },
- {
- type: 'LEFT_EYE_TOP_BOUNDARY',
- position: {}
- },
- {
- type: 'RIGHT_EYE_BOTTOM_BOUNDARY',
- position: {}
- },
- {
- type: 'RIGHT_EYE',
- position: {}
- },
- {
- type: 'RIGHT_EYE_LEFT_CORNER',
- position: {}
- },
- {
- type: 'RIGHT_EYE_PUPIL',
- position: {}
- },
- {
- type: 'RIGHT_EYE_RIGHT_CORNER',
- position: {}
- },
- {
- type: 'RIGHT_EYE_TOP_BOUNDARY',
- position: {}
- },
- {
- type: 'FOREHEAD_GLABELLA',
- position: {}
- },
- {
- type: 'LOWER_LIP',
- position: {}
- },
- {
- type: 'UPPER_LIP',
- position: {}
- },
- {
- type: 'MOUTH_CENTER',
- position: {}
- },
- {
- type: 'MOUTH_LEFT',
- position: {}
- },
- {
- type: 'MOUTH_RIGHT',
- position: {}
- },
- {
- type: 'NOSE_BOTTOM_CENTER',
- position: {}
- },
- {
- type: 'NOSE_BOTTOM_LEFT',
- position: {}
- },
- {
- type: 'NOSE_BOTTOM_RIGHT',
- position: {}
- },
- {
- type: 'NOSE_TIP',
- position: {}
- },
- {
- type: 'MIDPOINT_BETWEEN_EYES',
- position: {}
- }
- ],
-
- detectionConfidence: 0.2,
- blurredLikelihood: 'LIKELY',
- underExposedLikelihood: 'LIKELY',
- joyLikelihood: 'LIKELY',
- headwearLikelihood: 'LIKELY',
- angerLikelihood: 'LIKELY',
- sorrowLikelihood: 'LIKELY',
- surpriseLikelihood: 'LIKELY',
-
- nonExistentLikelihood: 'LIKELY'
- };
- });
+ nonExistentLikelihood: 'LIKELY'
+ };
function findLandmark(type) {
var landmarks = faceAnnotation.landmarks;
@@ -1395,6 +1491,127 @@ describe('Vision', function() {
});
});
+ describe('formatFullTextAnnotation_', function() {
+ var BLOCK_TYPE = 'block type';
+
+ var LANGUAGE_CODE = 'language code';
+
+ var TEXT = 'F';
+
+ var VERTICES = [
+ { x: 0, y: 0 },
+ { x: 0, y: 0 },
+ { x: 0, y: 0 },
+ { x: 0, y: 0 }
+ ];
+
+ var fullTextAnnotation = {
+ text: 'Full text',
+ pages: [
+ {
+ property: {
+ detectedLanguages: [
+ {
+ languageCode: LANGUAGE_CODE
+ }
+ ]
+ },
+ width: 50,
+ height: 100,
+ blocks: [
+ {
+ blockType: BLOCK_TYPE,
+ boundingBox: {
+ vertices: VERTICES
+ },
+ paragraphs: [
+ {
+ boundingBox: {
+ vertices: VERTICES
+ },
+ words: [
+ {
+ boundingBox: {
+ vertices: VERTICES
+ },
+ symbols: [
+ {
+ boundingBox: {
+ vertices: VERTICES
+ },
+ text: TEXT
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ };
+
+ describe('verbose: false', function() {
+ var opts = {};
+
+ it('should return text property', function() {
+ var fmtd = Vision.formatFullTextAnnotation_(fullTextAnnotation, opts);
+
+ assert.strictEqual(fmtd, fullTextAnnotation.text);
+ });
+ });
+
+ describe('verbose: true', function() {
+ var opts = { verbose: true };
+
+ it('should return formatted annotation', function() {
+ var fmtd = Vision.formatFullTextAnnotation_(fullTextAnnotation, opts);
+
+ assert.deepEqual(fmtd, [
+ {
+ languages: [
+ LANGUAGE_CODE
+ ],
+ width: 50,
+ height: 100,
+ blocks: [
+ {
+ type: BLOCK_TYPE,
+ bounds: VERTICES,
+ paragraphs: [
+ {
+ bounds: VERTICES,
+ words: [
+ {
+ bounds: VERTICES,
+ symbols: [
+ {
+ bounds: VERTICES,
+ text: TEXT
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]);
+ });
+
+ it('should not require a bounding block box', function() {
+ var annoWithoutBounding = extend(true, {}, fullTextAnnotation);
+ delete annoWithoutBounding.pages[0].blocks[0].boundingBox;
+
+ var fmtd = Vision.formatFullTextAnnotation_(annoWithoutBounding, opts);
+
+ assert.deepEqual(fmtd[0].blocks[0].bounds, []);
+ });
+ });
+ });
+
describe('formatImagePropertiesAnnotation_', function() {
var imgAnnotation = {
dominantColors: {
@@ -1449,16 +1666,12 @@ describe('Vision', function() {
});
describe('formatSafeSearchAnnotation_', function() {
- var safeSearchAnno;
-
- before(function() {
- safeSearchAnno = {
- adult: 'LIKELY',
- medical: 'LIKELY',
- spoof: 'LIKELY',
- violence: 'LIKELY'
- };
- });
+ var safeSearchAnno = {
+ adult: 'LIKELY',
+ medical: 'LIKELY',
+ spoof: 'LIKELY',
+ violence: 'LIKELY'
+ };
describe('verbose: false', function() {
var opts = {};
@@ -1488,6 +1701,90 @@ describe('Vision', function() {
});
});
+ describe('formatWebDetection_', function() {
+ var webDetection = {
+ webEntities: [
+ {
+ description: 'description'
+ },
+ ],
+
+ fullMatchingImages: [
+ {
+ score: 0,
+ url: 'http://full-0'
+ },
+ {
+ score: 1,
+ url: 'http://full-1'
+ }
+ ],
+
+ partialMatchingImages: [
+ {
+ score: 0,
+ url: 'http://partial-0'
+ },
+ {
+ score: 1,
+ url: 'http://partial-1'
+ }
+ ],
+
+ pagesWithMatchingImages: [
+ {
+ score: 0,
+ url: 'http://page-0'
+ },
+ {
+ score: 1,
+ url: 'http://page-1'
+ }
+ ]
+ };
+
+ describe('verbose: false', function() {
+ var opts = {};
+
+ it('should return sorted & combined image urls', function() {
+ var fmtd = Vision.formatWebDetection_(webDetection, opts);
+
+ assert.deepEqual(fmtd, [
+ 'http://full-1',
+ 'http://full-0',
+ 'http://partial-1',
+ 'http://partial-0'
+ ]);
+ });
+ });
+
+ describe('verbose: true', function() {
+ var opts = {
+ verbose: true
+ };
+
+ it('should return entities, pages & individual, sorted urls', function() {
+ var fmtd = Vision.formatWebDetection_(webDetection, opts);
+
+ assert.deepEqual(fmtd, {
+ entities: webDetection.webEntities.map(prop('description')),
+ fullMatches: [
+ 'http://full-1',
+ 'http://full-0'
+ ],
+ partialMatches: [
+ 'http://partial-1',
+ 'http://partial-0'
+ ],
+ pages: [
+ 'http://page-1',
+ 'http://page-0'
+ ]
+ });
+ });
+ });
+ });
+
describe('gteLikelihood_', function() {
it('should return booleans', function() {
var baseLikelihood = Vision.likelihood.LIKELY;