Skip to content

Commit

Permalink
Add sample for Natural Language 1.1 launch (#352)
Browse files Browse the repository at this point in the history
* Fix NL tests

* Add analyzeEntitySentiment sample

* Fix failing tests

* Add NL 1.0 features

* Update dependencies
  • Loading branch information
Ace Nassri authored and jmdobry committed Apr 21, 2017
1 parent f8f7e62 commit e9c0f92
Show file tree
Hide file tree
Showing 3 changed files with 179 additions and 25 deletions.
149 changes: 134 additions & 15 deletions cloud-language/snippets/analyze.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ function analyzeSentimentOfText (text) {
const Language = require('@google-cloud/language');

// Instantiates a client
const language = Language();
const language = Language({ apiVersion: 'v1beta2' });

// The text to analyze, e.g. "Hello, world!"
// const text = 'Hello, world!';
Expand All @@ -32,9 +32,17 @@ function analyzeSentimentOfText (text) {
// Detects the sentiment of the document
document.detectSentiment()
.then((results) => {
const sentiment = results[0];
console.log(`Score: ${sentiment.score}`);
console.log(`Magnitude: ${sentiment.magnitude}`);
const sentiment = results[1].documentSentiment;
console.log(`Document sentiment:`)
console.log(` Score: ${sentiment.score}`);
console.log(` Magnitude: ${sentiment.magnitude}`);

const sentences = results[1].sentences;
sentences.forEach((sentence) => {
console.log(`Sentence: ${sentence.text.content}`);
console.log(` Score: ${sentence.sentiment.score}`);
console.log(` Magnitude: ${sentence.sentiment.magnitude}`);
});
})
.catch((err) => {
console.error('ERROR:', err);
Expand All @@ -49,7 +57,7 @@ function analyzeSentimentInFile (bucketName, fileName) {
const Storage = require('@google-cloud/storage');

// Instantiates the clients
const language = Language();
const language = Language({ apiVersion: 'v1beta2' });
const storage = Storage();

// The name of the bucket where the file resides, e.g. "my-bucket"
Expand All @@ -67,9 +75,17 @@ function analyzeSentimentInFile (bucketName, fileName) {
// Detects the sentiment of the document
document.detectSentiment()
.then((results) => {
const sentiment = results[0];
console.log(`Score: ${sentiment.score}`);
console.log(`Magnitude: ${sentiment.magnitude}`);
const sentiment = results[1].documentSentiment;
console.log(`Document sentiment:`)
console.log(` Score: ${sentiment.score}`);
console.log(` Magnitude: ${sentiment.magnitude}`);

const sentences = results[1].sentences;
sentences.forEach((sentence) => {
console.log(`Sentence: ${sentence.text.content}`);
console.log(` Score: ${sentence.sentiment.score}`);
console.log(` Magnitude: ${sentence.sentiment.magnitude}`);
});
})
.catch((err) => {
console.error('ERROR:', err);
Expand All @@ -83,7 +99,7 @@ function analyzeEntitiesOfText (text) {
const Language = require('@google-cloud/language');

// Instantiates a client
const language = Language();
const language = Language({ apiVersion: 'v1beta2' });

// The text to analyze, e.g. "Hello, world!"
// const text = 'Hello, world!';
Expand All @@ -94,12 +110,15 @@ function analyzeEntitiesOfText (text) {
// Detects entities in the document
document.detectEntities()
.then((results) => {
const entities = results[0];
const entities = results[1].entities;

console.log('Entities:');
entities.forEach((entity) => {
console.log(entity.name);
console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`);
if (entity.metadata && entity.metadata.wikipedia_url) {
console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`);
}
});
})
.catch((err) => {
Expand All @@ -115,7 +134,7 @@ function analyzeEntitiesInFile (bucketName, fileName) {
const Storage = require('@google-cloud/storage');

// Instantiates the clients
const language = Language();
const language = Language({ apiVersion: 'v1beta2' });
const storage = Storage();

// The name of the bucket where the file resides, e.g. "my-bucket"
Expand All @@ -139,6 +158,9 @@ function analyzeEntitiesInFile (bucketName, fileName) {
entities.forEach((entity) => {
console.log(entity.name);
console.log(` - Type: ${entity.type}, Salience: ${entity.salience}`);
if (entity.metadata && entity.metadata.wikipedia_url) {
console.log(` - Wikipedia URL: ${entity.metadata.wikipedia_url}$`);
}
});
})
.catch((err) => {
Expand All @@ -153,7 +175,7 @@ function analyzeSyntaxOfText (text) {
const Language = require('@google-cloud/language');

// Instantiates a client
const language = Language();
const language = Language({ apiVersion: 'v1beta2' });

// The text to analyze, e.g. "Hello, world!"
// const text = 'Hello, world!';
Expand All @@ -168,7 +190,8 @@ function analyzeSyntaxOfText (text) {

console.log('Parts of speech:');
syntax.forEach((part) => {
console.log(`${part.partOfSpeech.tag}:\t ${part.text.content}`);
console.log(`${part.partOfSpeech.tag}: ${part.text.content}`);
console.log(`Morphology:`, part.partOfSpeech);
});
})
.catch((err) => {
Expand All @@ -184,7 +207,7 @@ function analyzeSyntaxInFile (bucketName, fileName) {
const Storage = require('@google-cloud/storage');

// Instantiates the clients
const language = Language();
const language = Language({ apiVersion: 'v1beta2' });
const storage = Storage();

// The name of the bucket where the file resides, e.g. "my-bucket"
Expand All @@ -206,7 +229,8 @@ function analyzeSyntaxInFile (bucketName, fileName) {

console.log('Parts of speech:');
syntax.forEach((part) => {
console.log(`${part.partOfSpeech.tag}:\t ${part.text.content}`);
console.log(`${part.partOfSpeech.tag}: ${part.text.content}`);
console.log(`Morphology:`, part.partOfSpeech);
});
})
.catch((err) => {
Expand All @@ -215,6 +239,87 @@ function analyzeSyntaxInFile (bucketName, fileName) {
// [END language_syntax_file]
}

function analyzeEntitySentimentOfText (text) {
// [START language_entity_sentiment_string]
// Imports the Google Cloud client library
const Language = require('@google-cloud/language').v1beta2();

// Instantiates a client
const language = Language.languageServiceClient();

// The text to analyze, e.g. "Hello, world!"
// const text = 'Hello, world!';

// Configure a request containing a string
const request = {
document: {
type: 'PLAIN_TEXT',
content: text
}
};

// Detects sentiment of entities in the document
language.analyzeEntitySentiment(request)
.then((results) => {
const entities = results[0].entities;

console.log(`Entities and sentiments:`)
entities.forEach((entity) => {
console.log(` Name: ${entity.name}`);
console.log(` Type: ${entity.type}`);
console.log(` Score: ${entity.sentiment.score}`);
console.log(` Magnitude: ${entity.sentiment.magnitude}`);
});
})
.catch((err) => {
console.error('ERROR:', err);
});
// [END language_entity_sentiment_string]
}

function analyzeEntitySentimentInFile (bucketName, fileName) {
// [START language_entity_sentiment_file]
// Imports the Google Cloud client libraries
const Language = require('@google-cloud/language').v1beta2();
const Storage = require('@google-cloud/storage');

// Instantiates the clients
const language = Language.languageServiceClient();
const storage = Storage();

// The name of the bucket where the file resides, e.g. "my-bucket"
// const bucketName = 'my-bucket';

// The name of the file to analyze, e.g. "file.txt"
// const fileName = 'file.txt';

// Configure a request containing a string
const request = {
document: {
type: 'PLAIN_TEXT',
gcsContentUri: `gs://${bucketName}/${fileName}`
}
};

// Detects sentiment of entities in the document
language.analyzeEntitySentiment(request)
.then((results) => {
const entities = results[0].entities;

console.log(`Entities and sentiments:`)
entities.forEach((entity) => {
console.log(` Name: ${entity.name}`);
console.log(` Type: ${entity.type}`);
console.log(` Score: ${entity.sentiment.score}`);
console.log(` Magnitude: ${entity.sentiment.magnitude}`);
});
})
.catch((err) => {
console.error('ERROR:', err);
});
// [END language_entity_sentiment_file]
}

require(`yargs`)
.demand(1)
.command(
Expand Down Expand Up @@ -253,12 +358,26 @@ require(`yargs`)
{},
(opts) => analyzeSyntaxInFile(opts.bucketName, opts.fileName)
)
.command(
`entity-sentiment-text <text>`,
`Detects sentiment of the entities in a string.`,
{},
(opts) => analyzeEntitySentimentOfText(opts.text)
)
.command(
`entity-sentiment-file <bucketName> <fileName>`,
`Detects sentiment of the entities in a file in Google Cloud Storage.`,
{},
(opts) => analyzeEntitySentimentInFile(opts.bucketName, opts.fileName)
)
.example(`node $0 sentiment-text "President Obama is speaking at the White House."`)
.example(`node $0 sentiment-file my-bucket file.txt`, `Detects sentiment in gs://my-bucket/file.txt`)
.example(`node $0 entities-text "President Obama is speaking at the White House."`)
.example(`node $0 entities-file my-bucket file.txt`, `Detects entities in gs://my-bucket/file.txt`)
.example(`node $0 syntax-text "President Obama is speaking at the White House."`)
.example(`node $0 syntax-file my-bucket file.txt`, `Detects syntax in gs://my-bucket/file.txt`)
.example(`node $0 entity-sentiment-text "President Obama is speaking at the White House."`)
.example(`node $0 entity-sentiment-file my-bucket file.txt`, `Detects sentiment of entities in gs://my-bucket/file.txt`)
.wrap(120)
.recommendCommands()
.epilogue(`For more information, see https://cloud.google.com/natural-language/docs`)
Expand Down
6 changes: 3 additions & 3 deletions cloud-language/snippets/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
"test": "cd ..; npm run st -- --verbose language/system-test/*.test.js"
},
"dependencies": {
"@google-cloud/language": "0.10.2",
"@google-cloud/storage": "1.0.0",
"yargs": "7.0.2"
"@google-cloud/language": "0.10.3",
"@google-cloud/storage": "1.1.0",
"yargs": "7.1.0"
},
"engines": {
"node": ">=4.3.2"
Expand Down
49 changes: 42 additions & 7 deletions cloud-language/snippets/system-test/analyze.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ const bucketName = `nodejs-docs-samples-test-${uuid.v4()}`;
const fileName = `text.txt`;
const localFilePath = path.join(__dirname, `../resources/text.txt`);
const text = `President Obama is speaking at the White House.`;
const germanText = `Willkommen bei München`;

test.before(async () => {
const [bucket] = await storage.createBucket(bucketName);
Expand All @@ -43,16 +44,20 @@ test.after.always(async () => {
test.beforeEach(stubConsole);
test.afterEach.always(restoreConsole);

test(`should run sync recognize`, async (t) => {
test(`should analyze sentiment in text`, async (t) => {
const output = await runAsync(`${cmd} sentiment-text "${text}"`, cwd);
t.true(output.includes(`Score: 0.`));
t.true(output.includes(`Magnitude: 0.`));
t.true(output.includes(`Document sentiment:`));
t.true(output.includes(`Sentence: ${text}`));
t.true(output.includes(`Score: 0`));
t.true(output.includes(`Magnitude: 0`));
});

test(`should analyze sentiment in a file`, async (t) => {
const output = await runAsync(`${cmd} sentiment-file ${bucketName} ${fileName}`, cwd);
t.true(output.includes(`Score: 0.`));
t.true(output.includes(`Magnitude: 0.`));
t.true(output.includes(`Document sentiment:`));
t.true(output.includes(`Sentence: ${text}`));
t.true(output.includes(`Score: 0`));
t.true(output.includes(`Magnitude: 0`));
});

test(`should analyze entities in text`, async (t) => {
Expand All @@ -61,6 +66,7 @@ test(`should analyze entities in text`, async (t) => {
t.true(output.includes(`Type: PERSON`));
t.true(output.includes(`White House`));
t.true(output.includes(`Type: LOCATION`));
t.true(output.includes(`/wiki/Barack_Obama`));
});

test('should analyze entities in a file', async (t) => {
Expand All @@ -70,21 +76,50 @@ test('should analyze entities in a file', async (t) => {
t.true(output.includes(`Type: PERSON`));
t.true(output.includes(`White House`));
t.true(output.includes(`Type: LOCATION`));
t.true(output.includes(`/wiki/Barack_Obama`));
});

test(`should analyze syntax in text`, async (t) => {
const output = await runAsync(`${cmd} syntax-text "${text}"`, cwd);
t.true(output.includes(`Parts of speech:`));
t.true(output.includes(`NOUN:`));
t.true(output.includes(`President`));
t.true(output.includes(`NOUN:`));
t.true(output.includes(`Obama`));
t.true(output.includes(`Morphology:`));
t.true(output.includes(`tag: 'NOUN'`));
});

test('should analyze syntax in a file', async (t) => {
const output = await runAsync(`${cmd} syntax-file ${bucketName} ${fileName}`, cwd);
t.true(output.includes(`NOUN:`));
t.true(output.includes(`President`));
t.true(output.includes(`NOUN:`));
t.true(output.includes(`Obama`));
t.true(output.includes(`Morphology:`));
t.true(output.includes(`tag: 'NOUN'`));
});

test('should analyze syntax in a 1.1 language (German)', async (t) => {
const output = await runAsync(`${cmd} syntax-text "${germanText}"`, cwd);
t.true(output.includes(`Parts of speech:`));
t.true(output.includes(`ADV: Willkommen`));
t.true(output.includes(`ADP: bei`));
t.true(output.includes(`NOUN: München`));
});

test(`should analyze entity sentiment in text`, async (t) => {
const output = await runAsync(`${cmd} entity-sentiment-text "${text}"`, cwd);
t.true(output.includes(`Entities and sentiments:`));
t.true(output.includes(`Obama`));
t.true(output.includes(`PERSON`));
t.true(output.includes(`Score: 0`));
t.true(output.includes(`Magnitude: 0`));
});

test('should analyze entity sentiment in a file', async (t) => {
const output = await runAsync(`${cmd} entity-sentiment-file ${bucketName} ${fileName}`, cwd);
t.true(output.includes(`Entities and sentiments:`));
t.true(output.includes(`Obama`));
t.true(output.includes(`PERSON`));
t.true(output.includes(`Score: 0`));
t.true(output.includes(`Magnitude: 0`));
});

0 comments on commit e9c0f92

Please sign in to comment.