Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Language region tag update #1643

Merged
merged 10 commits into from
Aug 21, 2018
32 changes: 16 additions & 16 deletions language/classify_text/classify_text_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# [START classify_text_tutorial]
# [START language_classify_text_tutorial]
"""Using the classify_text method to find content categories of text files,
Then use the content category labels to compare text similarity.

For more information, see the tutorial page at
https://cloud.google.com/natural-language/docs/classify-text-tutorial.
"""

# [START classify_text_tutorial_import]
# [START language_classify_text_tutorial_imports]
import argparse
import io
import json
Expand All @@ -30,10 +30,10 @@
from google.cloud import language
import numpy
import six
# [END classify_text_tutorial_import]
# [END language_classify_text_tutorial_imports]


# [START def_classify]
# [START language_classify_text_tutorial_classify]
def classify(text, verbose=True):
"""Classify the input text into categories. """

Expand Down Expand Up @@ -61,10 +61,10 @@ def classify(text, verbose=True):
print(u'{:<16}: {}'.format('confidence', category.confidence))

return result
# [END def_classify]
# [END language_classify_text_tutorial_classify]


# [START def_index]
# [START language_classify_text_tutorial_index]
def index(path, index_file):
"""Classify each text file in a directory and write
the results to the index_file.
Expand All @@ -91,10 +91,10 @@ def index(path, index_file):

print('Texts indexed in file: {}'.format(index_file))
return result
# [END def_index]
# [END language_classify_text_tutorial_index]


# [START def_split_labels]
# [START language_classify_text_tutorial_split_labels]
def split_labels(categories):
"""The category labels are of the form "/a/b/c" up to three levels,
for example "/Computers & Electronics/Software", and these labels
Expand All @@ -121,10 +121,10 @@ def split_labels(categories):
_categories[label] = confidence

return _categories
# [END def_split_labels]
# [END language_classify_text_tutorial_split_labels]


# [START def_similarity]
# [START language_classify_text_tutorial_similarity]
def similarity(categories1, categories2):
"""Cosine similarity of the categories treated as sparse vectors."""
categories1 = split_labels(categories1)
Expand All @@ -143,10 +143,10 @@ def similarity(categories1, categories2):
dot += confidence * categories2.get(label, 0.0)

return dot / (norm1 * norm2)
# [END def_similarity]
# [END language_classify_text_tutorial_similarity]


# [START def_query]
# [START language_classify_text_tutorial_query]
def query(index_file, text, n_top=3):
"""Find the indexed files that are the most similar to
the query text.
Expand Down Expand Up @@ -176,10 +176,10 @@ def query(index_file, text, n_top=3):
print('\n')

return similarities
# [END def_query]
# [END language_classify_text_tutorial_query]


# [START def_query_category]
# [START language_classify_text_tutorial_query_category]
def query_category(index_file, category_string, n_top=3):
"""Find the indexed files that are the most similar to
the query label.
Expand Down Expand Up @@ -211,7 +211,7 @@ def query_category(index_file, category_string, n_top=3):
print('\n')

return similarities
# [END def_query_category]
# [END language_classify_text_tutorial_query_category]


if __name__ == '__main__':
Expand Down Expand Up @@ -255,4 +255,4 @@ def query_category(index_file, category_string, n_top=3):
query(args.index_file, args.text)
if args.command == 'query-category':
query_category(args.index_file, args.category)
# [END classify_text_tutorial]
# [END language_classify_text_tutorial]
8 changes: 4 additions & 4 deletions language/cloud-client/v1/quickstart.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,16 @@
def run_quickstart():
# [START language_quickstart]
# Imports the Google Cloud client library
# [START migration_import]
# [START language_python_migration_imports]
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
# [END migration_import]
# [END language_python_migration_imports]

# Instantiates a client
# [START migration_client]
# [START language_python_migration_client]
client = language.LanguageServiceClient()
# [END migration_client]
# [END language_python_migration_client]

# The text to analyze
text = u'Hello, world!'
Expand Down
58 changes: 30 additions & 28 deletions language/cloud-client/v1/snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
import six


# [START def_sentiment_text]
# [START language_sentiment_text]
def sentiment_text(text):
"""Detects sentiment in the text."""
client = language.LanguageServiceClient()
Expand All @@ -39,45 +39,45 @@ def sentiment_text(text):
text = text.decode('utf-8')

# Instantiates a plain text document.
# [START migration_document_text]
# [START migration_analyze_sentiment]
# [START language_python_migration_document_text]
# [START language_python_migration_sentiment_text]
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
# [END migration_document_text]
# [END language_python_migration_document_text]

# Detects sentiment in the document. You can also analyze HTML with:
# document.type == enums.Document.Type.HTML
sentiment = client.analyze_sentiment(document).document_sentiment

print('Score: {}'.format(sentiment.score))
print('Magnitude: {}'.format(sentiment.magnitude))
# [END migration_analyze_sentiment]
# [END def_sentiment_text]
# [END language_python_migration_sentiment_text]
# [END language_sentiment_text]


# [START def_sentiment_file]
# [START language_sentiment_gcs]
def sentiment_file(gcs_uri):
"""Detects sentiment in the file located in Google Cloud Storage."""
client = language.LanguageServiceClient()

# Instantiates a plain text document.
# [START migration_document_gcs_uri]
# [START language_python_migration_document_gcs]
document = types.Document(
gcs_content_uri=gcs_uri,
type=enums.Document.Type.PLAIN_TEXT)
# [END migration_document_gcs_uri]
# [END language_python_migration_document_gcs]

# Detects sentiment in the document. You can also analyze HTML with:
# document.type == enums.Document.Type.HTML
sentiment = client.analyze_sentiment(document).document_sentiment

print('Score: {}'.format(sentiment.score))
print('Magnitude: {}'.format(sentiment.magnitude))
# [END def_sentiment_file]
# [END language_sentiment_gcs]


# [START def_entities_text]
# [START language_entities_text]
def entities_text(text):
"""Detects entities in the text."""
client = language.LanguageServiceClient()
Expand All @@ -86,7 +86,7 @@ def entities_text(text):
text = text.decode('utf-8')

# Instantiates a plain text document.
# [START migration_analyze_entities]
# [START language_python_migration_entities_text]
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
Expand All @@ -107,11 +107,11 @@ def entities_text(text):
print(u'{:<16}: {}'.format('salience', entity.salience))
print(u'{:<16}: {}'.format('wikipedia_url',
entity.metadata.get('wikipedia_url', '-')))
# [END migration_analyze_entities]
# [END def_entities_text]
# [END language_python_migration_entities_text]
# [END language_entities_text]


# [START def_entities_file]
# [START language_entities_gcs]
def entities_file(gcs_uri):
"""Detects entities in the file located in Google Cloud Storage."""
client = language.LanguageServiceClient()
Expand All @@ -137,10 +137,10 @@ def entities_file(gcs_uri):
print(u'{:<16}: {}'.format('salience', entity.salience))
print(u'{:<16}: {}'.format('wikipedia_url',
entity.metadata.get('wikipedia_url', '-')))
# [END def_entities_file]
# [END language_entities_gcs]


# [START def_syntax_text]
# [START language_syntax_text]
def syntax_text(text):
"""Detects syntax in the text."""
client = language.LanguageServiceClient()
Expand All @@ -149,7 +149,7 @@ def syntax_text(text):
text = text.decode('utf-8')

# Instantiates a plain text document.
# [START migration_analyze_syntax]
# [START language_python_migration_syntax_text]
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
Expand All @@ -165,11 +165,11 @@ def syntax_text(text):
for token in tokens:
print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag],
token.text.content))
# [END migration_analyze_syntax]
# [END def_syntax_text]
# [END language_python_migration_syntax_text]
# [END language_syntax_text]


# [START def_syntax_file]
# [START language_syntax_gcs]
def syntax_file(gcs_uri):
"""Detects syntax in the file located in Google Cloud Storage."""
client = language.LanguageServiceClient()
Expand All @@ -190,10 +190,10 @@ def syntax_file(gcs_uri):
for token in tokens:
print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag],
token.text.content))
# [END def_syntax_file]
# [END language_syntax_gcs]


# [START def_entity_sentiment_text]
# [START language_entity_sentiment_text]
def entity_sentiment_text(text):
"""Detects entity sentiment in the provided text."""
client = language.LanguageServiceClient()
Expand Down Expand Up @@ -223,9 +223,10 @@ def entity_sentiment_text(text):
print(u' Type : {}'.format(mention.type))
print(u'Salience: {}'.format(entity.salience))
print(u'Sentiment: {}\n'.format(entity.sentiment))
# [END def_entity_sentiment_text]
# [END language_entity_sentiment_text]


# [START language_entity_sentiment_gcs]
def entity_sentiment_file(gcs_uri):
"""Detects entity sentiment in a Google Cloud Storage file."""
client = language.LanguageServiceClient()
Expand All @@ -251,9 +252,10 @@ def entity_sentiment_file(gcs_uri):
print(u' Type : {}'.format(mention.type))
print(u'Salience: {}'.format(entity.salience))
print(u'Sentiment: {}\n'.format(entity.sentiment))
# [END language_entity_sentiment_gcs]


# [START def_classify_text]
# [START language_classify_text]
def classify_text(text):
"""Classifies content categories of the provided text."""
client = language.LanguageServiceClient()
Expand All @@ -271,10 +273,10 @@ def classify_text(text):
print(u'=' * 20)
print(u'{:<16}: {}'.format('name', category.name))
print(u'{:<16}: {}'.format('confidence', category.confidence))
# [END def_classify_text]
# [END language_classify_text]


# [START def_classify_file]
# [START language_classify_gcs]
def classify_file(gcs_uri):
"""Classifies content categories of the text in a Google Cloud Storage
file.
Expand All @@ -291,7 +293,7 @@ def classify_file(gcs_uri):
print(u'=' * 20)
print(u'{:<16}: {}'.format('name', category.name))
print(u'{:<16}: {}'.format('confidence', category.confidence))
# [END def_classify_file]
# [END language_classify_gcs]


if __name__ == '__main__':
Expand Down
18 changes: 10 additions & 8 deletions language/sentiment/sentiment_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# [START sentiment_tutorial]
# [START language_sentiment_tutorial]
"""Demonstrates how to make a simple call to the Natural Language API."""

# [START sentiment_tutorial_import]
# [START language_sentiment_tutorial_imports]
import argparse

from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
# [END sentiment_tutorial_import]
# [END language_sentiment_tutorial_imports]


# [START def_print_result]
# [START language_sentiment_tutorial_print_result]
def print_result(annotations):
score = annotations.document_sentiment.score
magnitude = annotations.document_sentiment.magnitude
Expand All @@ -36,10 +36,10 @@ def print_result(annotations):
print('Overall Sentiment: score of {} with magnitude of {}'.format(
score, magnitude))
return 0
# [END def_print_result]
# [END language_sentiment_tutorial_print_result]


# [START def_analyze]
# [START language_sentiment_tutorial_analyze_sentiment]
def analyze(movie_review_filename):
"""Run a sentiment analysis request on text within a passed filename."""
client = language.LanguageServiceClient()
Expand All @@ -55,9 +55,10 @@ def analyze(movie_review_filename):

# Print the results
print_result(annotations)
# [END def_analyze]
# [END language_sentiment_tutorial_analyze_sentiment]


# [START language_sentiment_tutorial_run_application]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
Expand All @@ -68,4 +69,5 @@ def analyze(movie_review_filename):
args = parser.parse_args()

analyze(args.movie_review_filename)
# [END sentiment_tutorial]
# [END language_sentiment_tutorial_run_application]
# [END language_sentiment_tutorial]