Skip to content
This repository has been archived by the owner on Sep 5, 2023. It is now read-only.

fix: adds underscore to "type" to NL API samples #49

Merged
merged 3 commits into from
Nov 3, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion samples/v1/language_classify_gcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def sample_classify_text(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

response = client.classify_text(request = {'document': document})
# Loop through classified categories returned from the API
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_classify_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def sample_classify_text(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

response = client.classify_text(request = {'document': document})
# Loop through classified categories returned from the API
Expand Down
8 changes: 4 additions & 4 deletions samples/v1/language_entities_gcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,17 @@ def sample_analyze_entities(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1..EncodingType.UTF8
encoding_type = language_v1.EncodingType.UTF8

response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type})
# Loop through entitites returned from the API
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name))
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Loop over the metadata associated with entity. For many known entities,
Expand All @@ -73,7 +73,7 @@ def sample_analyze_entities(gcs_content_uri):
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name)
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)

# Get the language of the text, which will be the same as
Expand Down
6 changes: 3 additions & 3 deletions samples/v1/language_entities_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def sample_analyze_entities(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand All @@ -58,7 +58,7 @@ def sample_analyze_entities(text_content):
print(u"Representative name for the entity: {}".format(entity.name))

# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name))
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))

# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
Expand All @@ -77,7 +77,7 @@ def sample_analyze_entities(text_content):

# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name)
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)

# Get the language of the text, which will be the same as
Expand Down
6 changes: 3 additions & 3 deletions samples/v1/language_entity_sentiment_gcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand All @@ -57,7 +57,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri):
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name))
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Get the aggregate sentiment expressed for this entity in the provided document.
Expand All @@ -77,7 +77,7 @@ def sample_analyze_entity_sentiment(gcs_content_uri):
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name)
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)

# Get the language of the text, which will be the same as
Expand Down
6 changes: 3 additions & 3 deletions samples/v1/language_entity_sentiment_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def sample_analyze_entity_sentiment(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand All @@ -56,7 +56,7 @@ def sample_analyze_entity_sentiment(text_content):
for entity in response.entities:
print(u"Representative name for the entity: {}".format(entity.name))
# Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type).name))
print(u"Entity type: {}".format(language_v1.Entity.Type(entity.type_).name))
# Get the salience score associated with the entity in the [0, 1.0] range
print(u"Salience score: {}".format(entity.salience))
# Get the aggregate sentiment expressed for this entity in the provided document.
Expand All @@ -76,7 +76,7 @@ def sample_analyze_entity_sentiment(text_content):
print(u"Mention text: {}".format(mention.text.content))
# Get the mention type, e.g. PROPER for proper noun
print(
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type).name)
u"Mention type: {}".format(language_v1.EntityMention.Type(mention.type_).name)
)

# Get the language of the text, which will be the same as
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_sentiment_gcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def sample_analyze_sentiment(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_sentiment_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def sample_analyze_sentiment(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_syntax_gcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def sample_analyze_syntax(gcs_content_uri):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
document = {"gcs_content_uri": gcs_content_uri, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand Down
2 changes: 1 addition & 1 deletion samples/v1/language_syntax_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def sample_analyze_syntax(text_content):
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type": type_, "language": language}
document = {"content": text_content, "type_": type_, "language": language}

# Available values: NONE, UTF8, UTF16, UTF32
encoding_type = language_v1.EncodingType.UTF8
Expand Down