Skip to content

Commit

Permalink
Updates client library to version 0.23.0 [(#832)](GoogleCloudPlatform…
Browse files Browse the repository at this point in the history
  • Loading branch information
gguuss authored and busunkim96 committed Sep 29, 2020
1 parent 7da32b8 commit c69e7e1
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def run_quickstart():
document = language_client.document_from_text(text)

# Detects the sentiment of the text
sentiment = document.analyze_sentiment()
sentiment = document.analyze_sentiment().sentiment

print('Text: {}'.format(text))
print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude))
Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1 @@
google-cloud-language==0.22.2
google-cloud-language==0.23
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def sentiment_text(text):

# Detects sentiment in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
sentiment = document.analyze_sentiment()
sentiment = document.analyze_sentiment().sentiment

print('Score: {}'.format(sentiment.score))
print('Magnitude: {}'.format(sentiment.magnitude))
Expand All @@ -50,7 +50,7 @@ def sentiment_file(gcs_uri):

# Detects sentiment in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
sentiment = document.analyze_sentiment()
sentiment = document.analyze_sentiment().sentiment

print('Score: {}'.format(sentiment.score))
print('Magnitude: {}'.format(sentiment.magnitude))
Expand All @@ -65,15 +65,16 @@ def entities_text(text):

# Detects entities in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
entities = document.analyze_entities()
entities = document.analyze_entities().entities

for entity in entities:
print('=' * 20)
print('{:<16}: {}'.format('name', entity.name))
print('{:<16}: {}'.format('type', entity.entity_type))
print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url))
print('{:<16}: {}'.format('metadata', entity.metadata))
print('{:<16}: {}'.format('salience', entity.salience))
print('{:<16}: {}'.format('wikipedia_url',
entity.metadata.get('wikipedia_url', '-')))


def entities_file(gcs_uri):
Expand All @@ -85,15 +86,16 @@ def entities_file(gcs_uri):

# Detects sentiment in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
entities = document.analyze_entities()
entities = document.analyze_entities().entities

for entity in entities:
print('=' * 20)
print('{:<16}: {}'.format('name', entity.name))
print('{:<16}: {}'.format('type', entity.entity_type))
print('{:<16}: {}'.format('wikipedia_url', entity.wikipedia_url))
print('{:<16}: {}'.format('metadata', entity.metadata))
print('{:<16}: {}'.format('salience', entity.salience))
print('{:<16}: {}'.format('wikipedia_url',
entity.metadata.get('wikipedia_url', '-')))


def syntax_text(text):
Expand All @@ -105,7 +107,7 @@ def syntax_text(text):

# Detects syntax in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
tokens = document.analyze_syntax()
tokens = document.analyze_syntax().tokens

for token in tokens:
print('{}: {}'.format(token.part_of_speech, token.text_content))
Expand All @@ -120,7 +122,7 @@ def syntax_file(gcs_uri):

# Detects syntax in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
tokens = document.analyze_syntax()
tokens = document.analyze_syntax().tokens

for token in tokens:
print('{}: {}'.format(token.part_of_speech, token.text_content))
Expand Down

0 comments on commit c69e7e1

Please sign in to comment.