From 7ff0a5bc4a4fd8be8c49b0ed517bf00cb71fdafe Mon Sep 17 00:00:00 2001 From: Jesse Friedman Date: Thu, 26 May 2016 16:51:43 -0400 Subject: [PATCH] vision: support ImageContext in detect requests (#1342) * vision: support ImageContext in detect requests ImageContext allows the user to provide "hints" about the content of the image to the Vision API. See https://cloud.google.com/vision/reference/rest/v1/images/annotate#ImageContext. * Added name to authors/contributors * Removed trailing whitespace * vision: Alphabetizing imageContext docs and handler locations, simplifying imageContext docs * vision: Adding test for imageContext * vision: Fixing doc formatting for imageContext --- AUTHORS | 1 + CONTRIBUTORS | 1 + lib/vision/index.js | 7 +++++++ test/vision/index.js | 34 ++++++++++++++++++++++++++++++++++ 4 files changed, 43 insertions(+) diff --git a/AUTHORS b/AUTHORS index 7ac622c350e..8f7cda8d42d 100644 --- a/AUTHORS +++ b/AUTHORS @@ -9,3 +9,4 @@ Google Inc. Anand Suresh Brett Bergmann +Jesse Friedman \ No newline at end of file diff --git a/CONTRIBUTORS b/CONTRIBUTORS index a387c494c87..576f30215ab 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -17,6 +17,7 @@ Brett Bergmann Burcu Dogan Hector Rovira Ido Shamun +Jesse Friedman Johan Euphrosine Marco Ziccardi Patrick Costello diff --git a/lib/vision/index.js b/lib/vision/index.js index dbc6f310386..0b04ee3d7b8 100644 --- a/lib/vision/index.js +++ b/lib/vision/index.js @@ -187,6 +187,9 @@ Vision.prototype.annotate = function(requests, callback) { * image path, a remote image URL, or a gcloud File object. * @param {string[]|object=} options - An array of types or a configuration * object. + * @param {object=} options.imageContext - See an + * [`ImageContext`](https://cloud.google.com/vision/reference/rest/v1/images/annotate#ImageContext) + * resource. * @param {number} options.maxResults - The maximum number of results, per type, * to return in the response. * @param {string[]} options.types - An array of feature types to detect from @@ -312,6 +315,10 @@ Vision.prototype.detect = function(images, options, callback) { } }; + if (is.object(options.imageContext)) { + cfg.imageContext = options.imageContext; + } + if (is.number(options.maxResults)) { cfg.features.maxResults = options.maxResults; } diff --git a/test/vision/index.js b/test/vision/index.js index db93183cf33..8b0bbd8ad0b 100644 --- a/test/vision/index.js +++ b/test/vision/index.js @@ -311,6 +311,40 @@ describe('Vision', function() { async.each(shortNames, checkConfig, done); }); + it('should allow setting imageContext', function(done) { + var imageContext = { + latLongRect: { + minLatLng: { + latitude: 37.420901, + longitude: -122.081293 + }, + maxLatLng: { + latitude: 37.423228, + longitude: -122.086347 + } + } + }; + + vision.annotate = function(config) { + assert.deepEqual(config, [ + { + image: IMAGES[0], + features: { + type: 'LABEL_DETECTION' + }, + imageContext: imageContext + } + ]); + + done(); + }; + + vision.detect(IMAGE, { + types: ['label'], + imageContext: imageContext + }, assert.ifError); + }); + it('should allow setting maxResults', function(done) { var maxResults = 10;