-
Notifications
You must be signed in to change notification settings - Fork 2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[NL] v1 and v1beta2 updates #470
Changes from 3 commits
b46c83d
c49beaf
5af566f
6b2c0e2
dcd005c
b498633
be879d1
0554498
27d7bca
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -241,6 +241,79 @@ function analyzeSyntaxInFile (bucketName, fileName) { | |
// [END language_syntax_file] | ||
} | ||
|
||
function analyzeEntitySentimentOfText (text) { | ||
// [START language_entity_sentiment_string] | ||
// Imports the Google Cloud client library | ||
const language = require('@google-cloud/language').v1beta2(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should be changed to v1. |
||
|
||
// The text to analyze, e.g. "Hello, world!" | ||
// const text = 'Hello, world!'; | ||
|
||
// Configure a request containing a string | ||
const request = { | ||
document: { | ||
type: 'PLAIN_TEXT', | ||
content: text | ||
} | ||
}; | ||
|
||
// Detects sentiment of entities in the document | ||
language.analyzeEntitySentiment(request) | ||
.then((results) => { | ||
const entities = results[0].entities; | ||
|
||
console.log(`Entities and sentiments:`); | ||
entities.forEach((entity) => { | ||
console.log(` Name: ${entity.name}`); | ||
console.log(` Type: ${entity.type}`); | ||
console.log(` Score: ${entity.sentiment.score}`); | ||
console.log(` Magnitude: ${entity.sentiment.magnitude}`); | ||
}); | ||
}) | ||
.catch((err) => { | ||
console.error('ERROR:', err); | ||
}); | ||
// [END language_entity_sentiment_string] | ||
} | ||
|
||
function analyzeEntitySentimentInFile (bucketName, fileName) { | ||
// [START language_entity_sentiment_file] | ||
// Imports the Google Cloud client library | ||
const language = require('@google-cloud/language').v1beta2(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should be changed to v1. |
||
|
||
// The name of the bucket where the file resides, e.g. "my-bucket" | ||
// const bucketName = 'my-bucket'; | ||
|
||
// The name of the file to analyze, e.g. "file.txt" | ||
// const fileName = 'file.txt'; | ||
|
||
// Configure a request containing a string | ||
const request = { | ||
document: { | ||
type: 'PLAIN_TEXT', | ||
gcsContentUri: `gs://${bucketName}/${fileName}` | ||
} | ||
}; | ||
|
||
// Detects sentiment of entities in the document | ||
language.analyzeEntitySentiment(request) | ||
.then((results) => { | ||
const entities = results[0].entities; | ||
|
||
console.log(`Entities and sentiments:`); | ||
entities.forEach((entity) => { | ||
console.log(` Name: ${entity.name}`); | ||
console.log(` Type: ${entity.type}`); | ||
console.log(` Score: ${entity.sentiment.score}`); | ||
console.log(` Magnitude: ${entity.sentiment.magnitude}`); | ||
}); | ||
}) | ||
.catch((err) => { | ||
console.error('ERROR:', err); | ||
}); | ||
// [END language_entity_sentiment_file] | ||
} | ||
|
||
require(`yargs`) // eslint-disable-line | ||
.demand(1) | ||
.command( | ||
|
@@ -279,12 +352,26 @@ require(`yargs`) // eslint-disable-line | |
{}, | ||
(opts) => analyzeSyntaxInFile(opts.bucketName, opts.fileName) | ||
) | ||
.command( | ||
`entity-sentiment-text <text>`, | ||
`Detects sentiment of the entities in a string.`, | ||
{}, | ||
(opts) => analyzeEntitySentimentOfText(opts.text) | ||
) | ||
.command( | ||
`entity-sentiment-file <bucketName> <fileName>`, | ||
`Detects sentiment of the entities in a file in Google Cloud Storage.`, | ||
{}, | ||
(opts) => analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) | ||
) | ||
.example(`node $0 sentiment-text "President Obama is speaking at the White House."`) | ||
.example(`node $0 sentiment-file my-bucket file.txt`, `Detects sentiment in gs://my-bucket/file.txt`) | ||
.example(`node $0 entities-text "President Obama is speaking at the White House."`) | ||
.example(`node $0 entities-file my-bucket file.txt`, `Detects entities in gs://my-bucket/file.txt`) | ||
.example(`node $0 syntax-text "President Obama is speaking at the White House."`) | ||
.example(`node $0 syntax-file my-bucket file.txt`, `Detects syntax in gs://my-bucket/file.txt`) | ||
.example(`node $0 entity-sentiment-text "President Obama is speaking at the White House."`) | ||
.example(`node $0 entity-sentiment-file my-bucket file.txt`, `Detects sentiment of entities in gs://my-bucket/file.txt`) | ||
.wrap(120) | ||
.recommendCommands() | ||
.epilogue(`For more information, see https://cloud.google.com/natural-language/docs`) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -97,7 +97,10 @@ function analyzeSentimentInFile (bucketName, fileName) { | |
function analyzeEntitiesOfText (text) { | ||
// [START language_entities_string] | ||
// Imports the Google Cloud client library | ||
const language = require('@google-cloud/language').v1beta2(); | ||
const Language = require('@google-cloud/language'); | ||
|
||
// Instantiates the clients | ||
const language = Language({ apiVersion: 'v1beta2' }); | ||
|
||
// The text to analyze, e.g. "Hello, world!" | ||
// const text = 'Hello, world!'; | ||
|
@@ -131,7 +134,10 @@ function analyzeEntitiesOfText (text) { | |
function analyzeEntitiesInFile (bucketName, fileName) { | ||
// [START language_entities_file] | ||
// Imports the Google Cloud client libraries | ||
const language = require('@google-cloud/language').v1beta2(); | ||
const Language = require('@google-cloud/language'); | ||
|
||
// Instantiates the clients | ||
const language = Language({ apiVersion: 'v1beta2' }); | ||
|
||
// The name of the bucket where the file resides, e.g. "my-bucket" | ||
// const bucketName = 'my-bucket'; | ||
|
@@ -168,7 +174,10 @@ function analyzeEntitiesInFile (bucketName, fileName) { | |
function analyzeSyntaxOfText (text) { | ||
// [START language_syntax_string] | ||
// Imports the Google Cloud client library | ||
const language = require('@google-cloud/language').v1beta2(); | ||
const Language = require('@google-cloud/language'); | ||
|
||
// Instantiates the clients | ||
const language = Language({ apiVersion: 'v1beta2' }); | ||
|
||
// The text to analyze, e.g. "Hello, world!" | ||
// const text = 'Hello, world!'; | ||
|
@@ -199,7 +208,10 @@ function analyzeSyntaxOfText (text) { | |
function analyzeSyntaxInFile (bucketName, fileName) { | ||
// [START language_syntax_file] | ||
// Imports the Google Cloud client libraries | ||
const language = require('@google-cloud/language').v1beta2(); | ||
const Language = require('@google-cloud/language'); | ||
|
||
// Instantiates the clients | ||
const language = Language({ apiVersion: 'v1beta2' }); | ||
|
||
// The name of the bucket where the file resides, e.g. "my-bucket" | ||
// const bucketName = 'my-bucket'; | ||
|
@@ -230,77 +242,73 @@ function analyzeSyntaxInFile (bucketName, fileName) { | |
// [END language_syntax_file] | ||
} | ||
|
||
function analyzeEntitySentimentOfText (text) { | ||
// [START language_entity_sentiment_string] | ||
function classifyTextOfText (text) { | ||
// [START language_classify_string] | ||
// Imports the Google Cloud client library | ||
const language = require('@google-cloud/language').v1beta2(); | ||
const Language = require('@google-cloud/language'); | ||
|
||
// Creates a client | ||
const language = Language.v1beta2(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just wondering what's the difference between these two ways of creating a client?
vs
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The first one uses the generated v1 client, but points it add the v1beta2 service endpoint. The second one uses the generated v1beta2 client (which points at the |
||
|
||
// The text to analyze, e.g. "Hello, world!" | ||
// const text = 'Hello, world!'; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Probably use a longer text here too. |
||
|
||
// Configure a request containing a string | ||
const request = { | ||
document: { | ||
type: 'PLAIN_TEXT', | ||
content: text | ||
} | ||
// Instantiates a Document, representing the provided text | ||
const document = { | ||
'content': text, | ||
type: 'PLAIN_TEXT' | ||
}; | ||
|
||
// Detects sentiment of entities in the document | ||
language.analyzeEntitySentiment(request) | ||
// Classifies text in the document | ||
language.classifyText({ document: document }) | ||
.then((results) => { | ||
const entities = results[0].entities; | ||
const classification = results[0]; | ||
|
||
console.log(`Entities and sentiments:`); | ||
entities.forEach((entity) => { | ||
console.log(` Name: ${entity.name}`); | ||
console.log(` Type: ${entity.type}`); | ||
console.log(` Score: ${entity.sentiment.score}`); | ||
console.log(` Magnitude: ${entity.sentiment.magnitude}`); | ||
console.log('Categories:'); | ||
classification.categories.forEach((category) => { | ||
console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); | ||
}); | ||
}) | ||
.catch((err) => { | ||
console.error('ERROR:', err); | ||
}); | ||
// [END language_entity_sentiment_string] | ||
// [END language_classify_string] | ||
} | ||
|
||
function analyzeEntitySentimentInFile (bucketName, fileName) { | ||
// [START language_entity_sentiment_file] | ||
// Imports the Google Cloud client library | ||
const language = require('@google-cloud/language').v1beta2(); | ||
function classifyTextInFile (bucketName, fileName) { | ||
// [START language_classify_file] | ||
// Imports the Google Cloud client libraries | ||
const Language = require('@google-cloud/language'); | ||
|
||
// Creates a client | ||
const language = Language.v1beta2(); | ||
|
||
// The name of the bucket where the file resides, e.g. "my-bucket" | ||
// const bucketName = 'my-bucket'; | ||
|
||
// The name of the file to analyze, e.g. "file.txt" | ||
// const fileName = 'file.txt'; | ||
|
||
// Configure a request containing a string | ||
const request = { | ||
document: { | ||
type: 'PLAIN_TEXT', | ||
gcsContentUri: `gs://${bucketName}/${fileName}` | ||
} | ||
// Instantiates a Document, representing a text file in Cloud Storage | ||
const document = { | ||
gcsContentUri: `gs://${bucketName}/${fileName}`, | ||
type: 'PLAIN_TEXT' | ||
}; | ||
|
||
// Detects sentiment of entities in the document | ||
language.analyzeEntitySentiment(request) | ||
// Classifies text in the document | ||
language.classifyText({ document: document }) | ||
.then((results) => { | ||
const entities = results[0].entities; | ||
const classification = results[0]; | ||
|
||
console.log(`Entities and sentiments:`); | ||
entities.forEach((entity) => { | ||
console.log(` Name: ${entity.name}`); | ||
console.log(` Type: ${entity.type}`); | ||
console.log(` Score: ${entity.sentiment.score}`); | ||
console.log(` Magnitude: ${entity.sentiment.magnitude}`); | ||
console.log('Categories:'); | ||
classification.categories.forEach((category) => { | ||
console.log(`Name: ${category.name}, Confidence: ${category.confidence}`); | ||
}); | ||
}) | ||
.catch((err) => { | ||
console.error('ERROR:', err); | ||
}); | ||
// [END language_entity_sentiment_file] | ||
// [END language_classify_file] | ||
} | ||
|
||
require(`yargs`) // eslint-disable-line | ||
|
@@ -342,25 +350,25 @@ require(`yargs`) // eslint-disable-line | |
(opts) => analyzeSyntaxInFile(opts.bucketName, opts.fileName) | ||
) | ||
.command( | ||
`entity-sentiment-text <text>`, | ||
`Detects sentiment of the entities in a string.`, | ||
`classify-text <text>`, | ||
`Classifies text of a string.`, | ||
{}, | ||
(opts) => analyzeEntitySentimentOfText(opts.text) | ||
(opts) => classifyTextOfText(opts.text) | ||
) | ||
.command( | ||
`entity-sentiment-file <bucketName> <fileName>`, | ||
`Detects sentiment of the entities in a file in Google Cloud Storage.`, | ||
`classify-file <bucketName> <fileName>`, | ||
`Classifies text in a file in Google Cloud Storage.`, | ||
{}, | ||
(opts) => analyzeEntitySentimentInFile(opts.bucketName, opts.fileName) | ||
(opts) => classifyTextInFile(opts.bucketName, opts.fileName) | ||
) | ||
.example(`node $0 sentiment-text "President Obama is speaking at the White House."`) | ||
.example(`node $0 sentiment-file my-bucket file.txt`, `Detects sentiment in gs://my-bucket/file.txt`) | ||
.example(`node $0 entities-text "President Obama is speaking at the White House."`) | ||
.example(`node $0 entities-file my-bucket file.txt`, `Detects entities in gs://my-bucket/file.txt`) | ||
.example(`node $0 syntax-text "President Obama is speaking at the White House."`) | ||
.example(`node $0 syntax-file my-bucket file.txt`, `Detects syntax in gs://my-bucket/file.txt`) | ||
.example(`node $0 entity-sentiment-text "President Obama is speaking at the White House."`) | ||
.example(`node $0 entity-sentiment-file my-bucket file.txt`, `Detects sentiment of entities in gs://my-bucket/file.txt`) | ||
.example(`node $0 classify-text "Android is a mobile operating system developed by Google."`) | ||
.example(`node $0 classify-file my-bucket android_text.txt`, `Detects syntax in gs://my-bucket/android_text.txt`) | ||
.wrap(120) | ||
.recommendCommands() | ||
.epilogue(`For more information, see https://cloud.google.com/natural-language/docs`) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This text is too short - currently the API requires 20 tokens to return non-empty results. This limitation might be improved in the future but let's use a longer piece of text for the sample just in case.