diff --git a/vision/cloud-client/crop_hints/README.rst b/vision/cloud-client/crop_hints/README.rst index 2f21627020c3..7c70174a67b1 100644 --- a/vision/cloud-client/crop_hints/README.rst +++ b/vision/cloud-client/crop_hints/README.rst @@ -3,7 +3,11 @@ Google Cloud Vision API Python Samples =============================================================================== -This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration diff --git a/vision/cloud-client/crop_hints/README.rst.in b/vision/cloud-client/crop_hints/README.rst.in index 5e9e7412b2cc..80ce77589d1a 100644 --- a/vision/cloud-client/crop_hints/README.rst.in +++ b/vision/cloud-client/crop_hints/README.rst.in @@ -8,7 +8,13 @@ product: `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of - explicit content + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration setup: - auth diff --git a/vision/cloud-client/crop_hints/crop_hints.py b/vision/cloud-client/crop_hints/crop_hints.py index 1f656c9776c7..3120fe2fbe4b 100644 --- a/vision/cloud-client/crop_hints/crop_hints.py +++ b/vision/cloud-client/crop_hints/crop_hints.py @@ -26,6 +26,7 @@ import io from google.cloud import vision +from google.cloud.vision import types from PIL import Image, ImageDraw # [END imports] @@ -33,17 +34,25 @@ def get_crop_hint(path): # [START get_crop_hint] """Detect crop hints on a single image and return the first result.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) - # Return bounds for the first crop hint using an aspect ratio of 1.77. - return image.detect_crop_hints({1.77})[0].bounds.vertices + crop_hints_params = types.CropHintsParams(aspect_ratios=[1.77]) + image_context = types.ImageContext(crop_hints_params=crop_hints_params) + + response = client.crop_hints(image=image, image_context=image_context) + hints = response.crop_hints_annotation.crop_hints + + # Get bounds for the first crop hint using an aspect ratio of 1.77. + vertices = hints[0].bounding_poly.vertices # [END get_crop_hint] + return vertices + def draw_hint(image_file): """Draw a border around the image using the hints in the vector list.""" @@ -53,10 +62,10 @@ def draw_hint(image_file): im = Image.open(image_file) draw = ImageDraw.Draw(im) draw.polygon([ - vects[0].x_coordinate, vects[0].y_coordinate, - vects[1].x_coordinate, vects[1].y_coordinate, - vects[2].x_coordinate, vects[2].y_coordinate, - vects[3].x_coordinate, vects[3].y_coordinate], None, 'red') + vects[0].x, vects[0].y, + vects[1].x, vects[1].y, + vects[2].x, vects[2].y, + vects[3].x, vects[3].y], None, 'red') im.save('output-hint.jpg', 'JPEG') # [END draw_hint] @@ -67,8 +76,8 @@ def crop_to_hint(image_file): vects = get_crop_hint(image_file) im = Image.open(image_file) - im2 = im.crop([vects[0].x_coordinate, vects[0].y_coordinate, - vects[2].x_coordinate - 1, vects[2].y_coordinate - 1]) + im2 = im.crop([vects[0].x, vects[0].y, + vects[2].x - 1, vects[2].y - 1]) im2.save('output-crop.jpg', 'JPEG') # [END crop_to_hint] diff --git a/vision/cloud-client/detect/README.rst b/vision/cloud-client/detect/README.rst index bb8bf95c20ff..87e9fdffe0a6 100644 --- a/vision/cloud-client/detect/README.rst +++ b/vision/cloud-client/detect/README.rst @@ -3,7 +3,11 @@ Google Cloud Vision API Python Samples =============================================================================== -This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration diff --git a/vision/cloud-client/detect/README.rst.in b/vision/cloud-client/detect/README.rst.in index dd9ce852759a..5ffe212923cd 100644 --- a/vision/cloud-client/detect/README.rst.in +++ b/vision/cloud-client/detect/README.rst.in @@ -8,7 +8,13 @@ product: `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of - explicit content + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration setup: - auth diff --git a/vision/cloud-client/detect/detect.py b/vision/cloud-client/detect/detect.py index 34037fb3f58f..1e89ea9845ce 100644 --- a/vision/cloud-client/detect/detect.py +++ b/vision/cloud-client/detect/detect.py @@ -32,259 +32,352 @@ import io from google.cloud import vision +from google.cloud.vision import types +# [START def_detect_faces] def detect_faces(path): """Detects faces in an image.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + # [START migration_face_detection] + # [START migration_image_file] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) + # [END migration_image_file] - faces = image.detect_faces() + response = client.face_detection(image=image) + faces = response.face_annotations + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') print('Faces:') for face in faces: - print('anger: {}'.format(face.emotions.anger)) - print('joy: {}'.format(face.emotions.joy)) - print('surprise: {}'.format(face.emotions.surprise)) + print('anger: {}'.format(likelihood_name[face.anger_likelihood])) + print('joy: {}'.format(likelihood_name[face.joy_likelihood])) + print('surprise: {}'.format(likelihood_name[face.surprise_likelihood])) - vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate) - for bound in face.bounds.vertices]) + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in face.bounding_poly.vertices]) print('face bounds: {}'.format(','.join(vertices))) + # [END migration_face_detection] +# [END def_detect_faces] +# [START def_detect_faces_uri] def detect_faces_uri(uri): """Detects faces in the file located in Google Cloud Storage or the web.""" - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) - - faces = image.detect_faces() + client = vision.ImageAnnotatorClient() + # [START migration_image_uri] + image = types.Image() + image.source.image_uri = uri + # [END migration_image_uri] + + response = client.face_detection(image=image) + faces = response.face_annotations + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') print('Faces:') for face in faces: - print('anger: {}'.format(face.emotions.anger)) - print('joy: {}'.format(face.emotions.joy)) - print('surprise: {}'.format(face.emotions.surprise)) + print('anger: {}'.format(likelihood_name[face.anger_likelihood])) + print('joy: {}'.format(likelihood_name[face.joy_likelihood])) + print('surprise: {}'.format(likelihood_name[face.surprise_likelihood])) - vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate) - for bound in face.bounds.vertices]) + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in face.bounding_poly.vertices]) print('face bounds: {}'.format(','.join(vertices))) +# [END def_detect_faces_uri] +# [START def_detect_labels] def detect_labels(path): """Detects labels in the file.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + # [START migration_label_detection] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) - labels = image.detect_labels() + response = client.label_detection(image=image) + labels = response.label_annotations print('Labels:') for label in labels: print(label.description) + # [END migration_label_detection] +# [END def_detect_labels] +# [START def_detect_labels_uri] def detect_labels_uri(uri): """Detects labels in the file located in Google Cloud Storage or on the Web.""" - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) + client = vision.ImageAnnotatorClient() + image = types.Image() + image.source.image_uri = uri - labels = image.detect_labels() + response = client.label_detection(image=image) + labels = response.label_annotations print('Labels:') for label in labels: print(label.description) +# [END def_detect_labels_uri] +# [START def_detect_landmarks] def detect_landmarks(path): """Detects landmarks in the file.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + # [START migration_landmark_detection] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) - landmarks = image.detect_landmarks() + response = client.landmark_detection(image=image) + landmarks = response.landmark_annotations print('Landmarks:') for landmark in landmarks: print(landmark.description) + for location in landmark.locations: + lat_lng = location.lat_lng + print('Latitude'.format(lat_lng.latitude)) + print('Longitude'.format(lat_lng.longitude)) + # [END migration_landmark_detection] +# [END def_detect_landmarks] +# [START def_detect_landmarks_uri] def detect_landmarks_uri(uri): """Detects landmarks in the file located in Google Cloud Storage or on the Web.""" - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) + client = vision.ImageAnnotatorClient() + image = types.Image() + image.source.image_uri = uri - landmarks = image.detect_landmarks() + response = client.landmark_detection(image=image) + landmarks = response.landmark_annotations print('Landmarks:') for landmark in landmarks: print(landmark.description) +# [END def_detect_landmarks_uri] +# [START def_detect_logos] def detect_logos(path): """Detects logos in the file.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + # [START migration_logo_detection] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) - logos = image.detect_logos() + response = client.logo_detection(image=image) + logos = response.logo_annotations print('Logos:') for logo in logos: print(logo.description) + # [END migration_logo_detection] +# [END def_detect_logos] +# [START def_detect_logos_uri] def detect_logos_uri(uri): """Detects logos in the file located in Google Cloud Storage or on the Web. """ - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) + client = vision.ImageAnnotatorClient() + image = types.Image() + image.source.image_uri = uri - logos = image.detect_logos() + response = client.logo_detection(image=image) + logos = response.logo_annotations print('Logos:') for logo in logos: print(logo.description) +# [END def_detect_logos_uri] +# [START def_detect_safe_search] def detect_safe_search(path): """Detects unsafe features in the file.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + # [START migration_safe_search_detection] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) + + response = client.safe_search_detection(image=image) + safe = response.safe_search_annotation - safe = image.detect_safe_search() + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') print('Safe search:') - print('adult: {}'.format(safe.adult)) - print('medical: {}'.format(safe.medical)) - print('spoofed: {}'.format(safe.spoof)) - print('violence: {}'.format(safe.violence)) + print('adult: {}'.format(likelihood_name[safe.adult])) + print('medical: {}'.format(likelihood_name[safe.medical])) + print('spoofed: {}'.format(likelihood_name[safe.spoof])) + print('violence: {}'.format(likelihood_name[safe.violence])) + # [END migration_safe_search_detection] +# [END def_detect_safe_search] + +# [START def_detect_safe_search_uri] def detect_safe_search_uri(uri): """Detects unsafe features in the file located in Google Cloud Storage or on the Web.""" - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) + client = vision.ImageAnnotatorClient() + image = types.Image() + image.source.image_uri = uri + + response = client.safe_search_detection(image=image) + safe = response.safe_search_annotation - safe = image.detect_safe_search() - print('adult: {}'.format(safe.adult)) - print('medical: {}'.format(safe.medical)) - print('spoofed: {}'.format(safe.spoof)) - print('violence: {}'.format(safe.violence)) + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') + print('Safe search:') + print('adult: {}'.format(likelihood_name[safe.adult])) + print('medical: {}'.format(likelihood_name[safe.medical])) + print('spoofed: {}'.format(likelihood_name[safe.spoof])) + print('violence: {}'.format(likelihood_name[safe.violence])) +# [END def_detect_safe_search_uri] + +# [START def_detect_text] def detect_text(path): """Detects text in the file.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + # [START migration_text_detection] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) - texts = image.detect_text() + response = client.text_detection(image=image) + texts = response.text_annotations print('Texts:') for text in texts: print('\n"{}"'.format(text.description)) - vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate) - for bound in text.bounds.vertices]) + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in text.bounding_poly.vertices]) print('bounds: {}'.format(','.join(vertices))) + # [END migration_text_detection] +# [END def_detect_text] +# [START def_detect_text_uri] def detect_text_uri(uri): """Detects text in the file located in Google Cloud Storage or on the Web. """ - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) + client = vision.ImageAnnotatorClient() + image = types.Image() + image.source.image_uri = uri - texts = image.detect_text() + response = client.text_detection(image=image) + texts = response.text_annotations print('Texts:') for text in texts: print('\n"{}"'.format(text.description)) - vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate) - for bound in text.bounds.vertices]) + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in text.bounding_poly.vertices]) print('bounds: {}'.format(','.join(vertices))) +# [END def_detect_text_uri] +# [START def_detect_properties] def detect_properties(path): """Detects image properties in the file.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + # [START migration_image_properties] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) - props = image.detect_properties() + response = client.image_properties(image=image) + props = response.image_properties_annotation print('Properties:') - for color in props.colors: + for color in props.dominant_colors.colors: print('fraction: {}'.format(color.pixel_fraction)) print('\tr: {}'.format(color.color.red)) print('\tg: {}'.format(color.color.green)) print('\tb: {}'.format(color.color.blue)) print('\ta: {}'.format(color.color.alpha)) + # [END migration_image_properties] +# [END def_detect_properties] +# [START def_detect_properties_uri] def detect_properties_uri(uri): """Detects image properties in the file located in Google Cloud Storage or on the Web.""" - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) + client = vision.ImageAnnotatorClient() + image = types.Image() + image.source.image_uri = uri - props = image.detect_properties() + response = client.image_properties(image=image) + props = response.image_properties_annotation print('Properties:') - for color in props.colors: + for color in props.dominant_colors.colors: print('frac: {}'.format(color.pixel_fraction)) print('\tr: {}'.format(color.color.red)) print('\tg: {}'.format(color.color.green)) print('\tb: {}'.format(color.color.blue)) print('\ta: {}'.format(color.color.alpha)) +# [END def_detect_properties_uri] +# [START def_detect_web] def detect_web(path): """Detects web annotations given an image.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + # [START migration_web_detection] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) - notes = image.detect_web() + response = client.web_detection(image=image) + notes = response.web_detection if notes.pages_with_matching_images: print('\n{} Pages with matching images retrieved') for page in notes.pages_with_matching_images: - print('Score : {}'.format(page.score)) print('Url : {}'.format(page.url)) if notes.full_matching_images: @@ -292,7 +385,6 @@ def detect_web(path): len(notes.full_matching_images))) for image in notes.full_matching_images: - print('Score: {}'.format(image.score)) print('Url : {}'.format(image.url)) if notes.partial_matching_images: @@ -300,7 +392,6 @@ def detect_web(path): len(notes.partial_matching_images))) for image in notes.partial_matching_images: - print('Score: {}'.format(image.score)) print('Url : {}'.format(image.url)) if notes.web_entities: @@ -309,20 +400,24 @@ def detect_web(path): for entity in notes.web_entities: print('Score : {}'.format(entity.score)) print('Description: {}'.format(entity.description)) + # [END migration_web_detection] +# [END def_detect_web] +# [START def_detect_web_uri] def detect_web_uri(uri): """Detects web annotations in the file located in Google Cloud Storage.""" - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) + client = vision.ImageAnnotatorClient() + image = types.Image() + image.source.image_uri = uri - notes = image.detect_web() + response = client.web_detection(image=image) + notes = response.web_detection if notes.pages_with_matching_images: print('\n{} Pages with matching images retrieved') for page in notes.pages_with_matching_images: - print('Score : {}'.format(page.score)) print('Url : {}'.format(page.url)) if notes.full_matching_images: @@ -330,7 +425,6 @@ def detect_web_uri(uri): len(notes.full_matching_images))) for image in notes.full_matching_images: - print('Score: {}'.format(image.score)) print('Url : {}'.format(image.url)) if notes.partial_matching_images: @@ -338,7 +432,6 @@ def detect_web_uri(uri): len(notes.partial_matching_images))) for image in notes.partial_matching_images: - print('Score: {}'.format(image.score)) print('Url : {}'.format(image.url)) if notes.web_entities: @@ -347,51 +440,72 @@ def detect_web_uri(uri): for entity in notes.web_entities: print('Score : {}'.format(entity.score)) print('Description: {}'.format(entity.description)) +# [END def_detect_web_uri] +# [START def_detect_crop_hints] def detect_crop_hints(path): """Detects crop hints in an image.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + + # [START migration_crop_hints] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) + + crop_hints_params = types.CropHintsParams(aspect_ratios=[1.77]) + image_context = types.ImageContext(crop_hints_params=crop_hints_params) - hints = image.detect_crop_hints({1.77}) + response = client.crop_hints(image=image, image_context=image_context) + hints = response.crop_hints_annotation.crop_hints for n, hint in enumerate(hints): print('\nCrop Hint: {}'.format(n)) - vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate) - for bound in hint.bounds.vertices]) + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in hint.bounding_poly.vertices]) print('bounds: {}'.format(','.join(vertices))) + # [END migration_crop_hints] +# [END def_detect_crop_hints] +# [START def_detect_crop_hints_uri] def detect_crop_hints_uri(uri): """Detects crop hints in the file located in Google Cloud Storage.""" - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) + client = vision.ImageAnnotatorClient() + image = types.Image() + image.source.image_uri = uri + + crop_hints_params = types.CropHintsParams(aspect_ratios=[1.77]) + image_context = types.ImageContext(crop_hints_params=crop_hints_params) + + response = client.crop_hints(image=image, image_context=image_context) + hints = response.crop_hints_annotation.crop_hints - hints = image.detect_crop_hints({1.77}) for n, hint in enumerate(hints): print('\nCrop Hint: {}'.format(n)) - vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate) - for bound in hint.bounds.vertices]) + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in hint.bounding_poly.vertices]) print('bounds: {}'.format(','.join(vertices))) +# [END def_detect_crop_hints_uri] +# [START def_detect_document] def detect_document(path): """Detects document features in an image.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() + # [START migration_document_text_detection] with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) - document = image.detect_full_text() + response = client.document_text_detection(image=image) + document = response.full_text_annotation for page in document.pages: for block in page.blocks: @@ -409,15 +523,20 @@ def detect_document(path): print('Block Content: {}'.format(block_text)) print('Block Bounds:\n {}'.format(block.bounding_box)) + # [END migration_document_text_detection] +# [END def_detect_document] +# [START def_detect_document_uri] def detect_document_uri(uri): """Detects document features in the file located in Google Cloud Storage.""" - vision_client = vision.Client() - image = vision_client.image(source_uri=uri) + client = vision.ImageAnnotatorClient() + image = types.Image() + image.source.image_uri = uri - document = image.detect_full_text() + response = client.document_text_detection(image=image) + document = response.full_text_annotation for page in document.pages: for block in page.blocks: @@ -435,6 +554,7 @@ def detect_document_uri(uri): print('Block Content: {}'.format(block_text)) print('Block Bounds:\n {}'.format(block.bounding_box)) +# [END def_detect_document_uri] def run_local(args): diff --git a/vision/cloud-client/detect/detect_test.py b/vision/cloud-client/detect/detect_test.py index ac19579546fd..7bea41af2ca7 100644 --- a/vision/cloud-client/detect/detect_test.py +++ b/vision/cloud-client/detect/detect_test.py @@ -71,14 +71,14 @@ def test_faces(capsys): 'resources/face_no_surprise.jpg') detect.detect_faces(file_name) out, _ = capsys.readouterr() - assert 'Likelihood.POSSIBLE' in out + assert 'POSSIBLE' in out def test_faces_uri(capsys): file_name = 'gs://{}/vision/face_no_surprise.jpg'.format(BUCKET) detect.detect_faces_uri(file_name) out, _ = capsys.readouterr() - assert 'Likelihood.POSSIBLE' in out + assert 'POSSIBLE' in out def test_faces_http(capsys): @@ -86,7 +86,7 @@ def test_faces_http(capsys): 'face_no_surprise.jpg') detect.detect_faces_uri(uri.format(BUCKET)) out, _ = capsys.readouterr() - assert 'Likelihood.POSSIBLE' in out + assert 'POSSIBLE' in out def test_logos(capsys): @@ -118,21 +118,21 @@ def test_safe_search(capsys): 'resources/wakeupcat.jpg') detect.detect_safe_search(file_name) out, _ = capsys.readouterr() - assert 'Likelihood.VERY_LIKELY' in out + assert 'VERY_LIKELY' in out def test_safe_search_uri(capsys): file_name = 'gs://{}/vision/wakeupcat.jpg'.format(BUCKET) detect.detect_safe_search_uri(file_name) out, _ = capsys.readouterr() - assert 'Likelihood.VERY_LIKELY' in out + assert 'VERY_LIKELY' in out def test_safe_search_http(capsys): uri = 'https://storage-download.googleapis.com/{}/vision/wakeupcat.jpg' detect.detect_safe_search_uri(uri.format(BUCKET)) out, _ = capsys.readouterr() - assert 'Likelihood.VERY_LIKELY' in out + assert 'VERY_LIKELY' in out def test_detect_text(capsys): diff --git a/vision/cloud-client/document_text/README.rst b/vision/cloud-client/document_text/README.rst index 9f312d15a086..2a279890cef8 100644 --- a/vision/cloud-client/document_text/README.rst +++ b/vision/cloud-client/document_text/README.rst @@ -5,6 +5,10 @@ Google Cloud Vision API Python Samples This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + diff --git a/vision/cloud-client/document_text/README.rst.in b/vision/cloud-client/document_text/README.rst.in index 72117cdc7aef..d4353ca03896 100644 --- a/vision/cloud-client/document_text/README.rst.in +++ b/vision/cloud-client/document_text/README.rst.in @@ -10,6 +10,12 @@ product: landmark detection, optical character recognition (OCR), and tagging of explicit content. + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + setup: - auth - install_deps diff --git a/vision/cloud-client/document_text/doctext.py b/vision/cloud-client/document_text/doctext.py index 9fb535ff65f7..478f2e22731a 100644 --- a/vision/cloud-client/document_text/doctext.py +++ b/vision/cloud-client/document_text/doctext.py @@ -26,6 +26,7 @@ import io from google.cloud import vision +from google.cloud.vision import types from PIL import Image, ImageDraw # [END imports] @@ -38,17 +39,17 @@ class FeatureType(Enum): SYMBOL = 5 -def draw_boxes(image, blocks, color): +def draw_boxes(image, bounds, color): """Draw a border around the image using the hints in the vector list.""" # [START draw_blocks] draw = ImageDraw.Draw(image) - for block in blocks: + for bound in bounds: draw.polygon([ - block.vertices[0].x, block.vertices[0].y, - block.vertices[1].x, block.vertices[1].y, - block.vertices[2].x, block.vertices[2].y, - block.vertices[3].x, block.vertices[3].y], None, color) + bound.vertices[0].x, bound.vertices[0].y, + bound.vertices[1].x, bound.vertices[1].y, + bound.vertices[2].x, bound.vertices[2].y, + bound.vertices[3].x, bound.vertices[3].y], None, color) return image # [END draw_blocks] @@ -56,15 +57,17 @@ def draw_boxes(image, blocks, color): def get_document_bounds(image_file, feature): # [START detect_bounds] """Returns document bounds given an image.""" - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() bounds = [] with io.open(image_file, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) - document = image.detect_full_text() + image = types.Image(content=content) + + response = client.document_text_detection(image=image) + document = response.full_text_annotation # Collect specified feature bounds by enumerating all document features for page in document.pages: @@ -87,8 +90,9 @@ def get_document_bounds(image_file, feature): if (feature == FeatureType.PAGE): bounds.append(block.bounding_box) - return bounds + # The list `bounds` contains the coordinates of the bounding boxes. # [END detect_bounds] + return bounds def render_doc_text(filein, fileout): @@ -109,7 +113,7 @@ def render_doc_text(filein, fileout): if __name__ == '__main__': - # [START run_crop] + # [START run_doc_text] parser = argparse.ArgumentParser() parser.add_argument('detect_file', help='The image for text detection.') parser.add_argument('-out_file', help='Optional output file', default=0) @@ -117,5 +121,5 @@ def render_doc_text(filein, fileout): parser = argparse.ArgumentParser() render_doc_text(args.detect_file, args.out_file) - # [END run_crop] + # [END run_doc_text] # [END full_tutorial] diff --git a/vision/cloud-client/face_detection/README.rst b/vision/cloud-client/face_detection/README.rst index a80adb4a1dec..5ae062f409d3 100644 --- a/vision/cloud-client/face_detection/README.rst +++ b/vision/cloud-client/face_detection/README.rst @@ -3,7 +3,11 @@ Google Cloud Vision API Python Samples =============================================================================== -This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration This sample demonstrates how to use the Cloud Vision API to do face detection. diff --git a/vision/cloud-client/face_detection/README.rst.in b/vision/cloud-client/face_detection/README.rst.in index a3cb277409a8..deb0a87b21b0 100644 --- a/vision/cloud-client/face_detection/README.rst.in +++ b/vision/cloud-client/face_detection/README.rst.in @@ -8,7 +8,13 @@ product: `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of - explicit content + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration description: > This sample demonstrates how to use the Cloud Vision API to do face detection. diff --git a/vision/cloud-client/face_detection/faces.py b/vision/cloud-client/face_detection/faces.py index 6baef63c76b1..25a40763b338 100755 --- a/vision/cloud-client/face_detection/faces.py +++ b/vision/cloud-client/face_detection/faces.py @@ -18,10 +18,14 @@ import argparse +# [START import_client_library] from google.cloud import vision +# [END import_client_library] +from google.cloud.vision import types from PIL import Image, ImageDraw +# [START def_detect_face] def detect_face(face_file, max_results=4): """Uses the Vision API to detect faces in the given file. @@ -31,14 +35,18 @@ def detect_face(face_file, max_results=4): Returns: An array of Face objects with information about the picture. """ - content = face_file.read() # [START get_vision_service] - image = vision.Client().image(content=content) + client = vision.ImageAnnotatorClient() # [END get_vision_service] - return image.detect_faces() + content = face_file.read() + image = types.Image(content=content) + + return client.face_detection(image=image).face_annotations +# [END def_detect_face] +# [START def_highlight_faces] def highlight_faces(image, faces, output_filename): """Draws a polygon around the faces, then saves to output_filename. @@ -53,13 +61,15 @@ def highlight_faces(image, faces, output_filename): draw = ImageDraw.Draw(im) for face in faces: - box = [(bound.x_coordinate, bound.y_coordinate) - for bound in face.bounds.vertices] + box = [(vertex.x, vertex.y) + for vertex in face.bounding_poly.vertices] draw.line(box + [box[0]], width=5, fill='#00ff00') im.save(output_filename) +# [END def_highlight_faces] +# [START def_main] def main(input_filename, output_filename, max_results): with open(input_filename, 'rb') as image: faces = detect_face(image, max_results) @@ -70,6 +80,7 @@ def main(input_filename, output_filename, max_results): # Reset the file pointer, so we can read the file again image.seek(0) highlight_faces(image, faces, output_filename) +# [END def_main] if __name__ == '__main__': diff --git a/vision/cloud-client/quickstart/README.rst b/vision/cloud-client/quickstart/README.rst index e6c5c8ff8ad4..b6e4b7484ae5 100644 --- a/vision/cloud-client/quickstart/README.rst +++ b/vision/cloud-client/quickstart/README.rst @@ -3,7 +3,11 @@ Google Cloud Vision API Python Samples =============================================================================== -This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration @@ -83,11 +87,10 @@ To run this sample: Labels: cat - mammal - whiskers + photo caption small to medium sized cats cat like mammal - animal shelter + whiskers diff --git a/vision/cloud-client/quickstart/README.rst.in b/vision/cloud-client/quickstart/README.rst.in index 4a02c8634bd2..10a76f1fd700 100644 --- a/vision/cloud-client/quickstart/README.rst.in +++ b/vision/cloud-client/quickstart/README.rst.in @@ -8,7 +8,13 @@ product: `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of - explicit content + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration setup: - auth diff --git a/vision/cloud-client/quickstart/quickstart.py b/vision/cloud-client/quickstart/quickstart.py index 0453587fe9ea..4eb2b303612b 100644 --- a/vision/cloud-client/quickstart/quickstart.py +++ b/vision/cloud-client/quickstart/quickstart.py @@ -21,10 +21,15 @@ def run_quickstart(): import os # Imports the Google Cloud client library + # [START migration_import] from google.cloud import vision + from google.cloud.vision import types + # [END migration_import] # Instantiates a client - vision_client = vision.Client() + # [START migration_client] + client = vision.ImageAnnotatorClient() + # [END migration_client] # The name of the image file to annotate file_name = os.path.join( @@ -34,11 +39,12 @@ def run_quickstart(): # Loads the image into memory with io.open(file_name, 'rb') as image_file: content = image_file.read() - image = vision_client.image( - content=content) + + image = types.Image(content=content) # Performs label detection on the image file - labels = image.detect_labels() + response = client.label_detection(image=image) + labels = response.label_annotations print('Labels:') for label in labels: diff --git a/vision/cloud-client/web/README.rst b/vision/cloud-client/web/README.rst index 0e2c54178e0d..52526346243c 100644 --- a/vision/cloud-client/web/README.rst +++ b/vision/cloud-client/web/README.rst @@ -3,7 +3,11 @@ Google Cloud Vision API Python Samples =============================================================================== -This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration diff --git a/vision/cloud-client/web/README.rst.in b/vision/cloud-client/web/README.rst.in index abcdec01dc90..0f4cf78487af 100644 --- a/vision/cloud-client/web/README.rst.in +++ b/vision/cloud-client/web/README.rst.in @@ -8,7 +8,13 @@ product: `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of - explicit content + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration setup: - auth diff --git a/vision/cloud-client/web/web_detect.py b/vision/cloud-client/web/web_detect.py index 622aae61a9ed..0b3e72f901e0 100644 --- a/vision/cloud-client/web/web_detect.py +++ b/vision/cloud-client/web/web_detect.py @@ -27,36 +27,39 @@ import io from google.cloud import vision +from google.cloud.vision import types # [END imports] def annotate(path): """Returns web annotations given the path to an image.""" # [START get_annotations] - image = None - vision_client = vision.Client() + client = vision.ImageAnnotatorClient() if path.startswith('http') or path.startswith('gs:'): - image = vision_client.image(source_uri=path) + image = types.Image() + image.source.image_uri = path else: with io.open(path, 'rb') as image_file: content = image_file.read() - image = vision_client.image(content=content) + image = types.Image(content=content) - return image.detect_web() + web_detection = client.web_detection(image=image).web_detection # [END get_annotations] + return web_detection + def report(annotations): """Prints detected features in the provided web annotations.""" # [START print_annotations] if annotations.pages_with_matching_images: - print('\n{} Pages with matching images retrieved') + print('\n{} Pages with matching images retrieved'.format( + len(annotations.pages_with_matching_images))) for page in annotations.pages_with_matching_images: - print('Score : {}'.format(page.score)) print('Url : {}'.format(page.url)) if annotations.full_matching_images: @@ -64,7 +67,6 @@ def report(annotations): len(annotations.full_matching_images))) for image in annotations.full_matching_images: - print('Score: {}'.format(image.score)) print('Url : {}'.format(image.url)) if annotations.partial_matching_images: @@ -72,7 +74,6 @@ def report(annotations): len(annotations.partial_matching_images))) for image in annotations.partial_matching_images: - print('Score: {}'.format(image.score)) print('Url : {}'.format(image.url)) if annotations.web_entities: