From c5e616612b9cbfac0bc45def7a7f5b78a87ff86d Mon Sep 17 00:00:00 2001
From: Thomas Schultz <daspecster@gmail.com>
Date: Fri, 6 Jan 2017 15:01:48 -0500
Subject: [PATCH] Add GAPIC support for face detection.

---
 system_tests/vision.py                    |  29 ++-
 vision/google/cloud/vision/_gax.py        |   4 +-
 vision/google/cloud/vision/annotations.py |  14 +
 vision/google/cloud/vision/client.py      |   2 +-
 vision/google/cloud/vision/face.py        | 302 ++++++++++++++++------
 vision/google/cloud/vision/geometry.py    |  27 +-
 vision/google/cloud/vision/image.py       |   4 +-
 vision/unit_tests/test__gax.py            |   4 +-
 vision/unit_tests/test_annotations.py     |  17 +-
 vision/unit_tests/test_client.py          |  63 +++--
 vision/unit_tests/test_face.py            |  17 ++
 vision/unit_tests/test_geometry.py        |  30 +++
 vision/unit_tests/test_image.py           |  10 +-
 13 files changed, 388 insertions(+), 135 deletions(-)
 create mode 100644 vision/unit_tests/test_geometry.py

diff --git a/system_tests/vision.py b/system_tests/vision.py
index d9c25fa4a2f5..3d0c029370f5 100644
--- a/system_tests/vision.py
+++ b/system_tests/vision.py
@@ -62,8 +62,8 @@ class BaseVisionTestCase(unittest.TestCase):
     def _assert_coordinate(self, coordinate):
         if coordinate is None:
             return
+        self.assertIsNotNone(coordinate)
         self.assertIsInstance(coordinate, (int, float))
-        self.assertNotEqual(coordinate, 0.0)
 
     def _assert_likelihood(self, likelihood):
         from google.cloud.vision.likelihood import Likelihood
@@ -73,8 +73,8 @@ def _assert_likelihood(self, likelihood):
                   Likelihood.VERY_UNLIKELY]
         self.assertIn(likelihood, levels)
 
-    def _maybe_http_skip(self, message):
-        if not Config.CLIENT._use_gax:
+    def _pb_not_implemented_skip(self, message):
+        if Config.CLIENT._use_gax:
             self.skipTest(message)
 
 
@@ -150,7 +150,7 @@ def _assert_landmarks(self, landmarks):
 
         for landmark in LandmarkTypes:
             if landmark is not LandmarkTypes.UNKNOWN_LANDMARK:
-                feature = getattr(landmarks, landmark.value.lower())
+                feature = getattr(landmarks, landmark.name.lower())
                 self.assertIsInstance(feature, Landmark)
                 self.assertIsInstance(feature.position, Position)
                 self._assert_coordinate(feature.position.x_coordinate)
@@ -194,7 +194,6 @@ def _assert_face(self, face):
 
     def test_detect_faces_content(self):
         client = Config.CLIENT
-        self._maybe_http_skip('gRPC is required for face detection.')
         with open(FACE_FILE, 'rb') as image_file:
             image = client.image(content=image_file.read())
         faces = image.detect_faces()
@@ -203,7 +202,6 @@ def test_detect_faces_content(self):
             self._assert_face(face)
 
     def test_detect_faces_gcs(self):
-        self._maybe_http_skip('gRPC is required for face detection.')
         bucket_name = Config.TEST_BUCKET.name
         blob_name = 'faces.jpg'
         blob = Config.TEST_BUCKET.blob(blob_name)
@@ -220,7 +218,6 @@ def test_detect_faces_gcs(self):
             self._assert_face(face)
 
     def test_detect_faces_filename(self):
-        self._maybe_http_skip('gRPC is required for face detection.')
         client = Config.CLIENT
         image = client.image(filename=FACE_FILE)
         faces = image.detect_faces()
@@ -367,7 +364,8 @@ def _assert_safe_search(self, safe_search):
         self._assert_likelihood(safe_search.violence)
 
     def test_detect_safe_search_content(self):
-        self._maybe_http_skip('gRPC is required for safe search detection.')
+        self._pb_not_implemented_skip(
+            'gRPC not implemented for safe search detection.')
         client = Config.CLIENT
         with open(FACE_FILE, 'rb') as image_file:
             image = client.image(content=image_file.read())
@@ -377,7 +375,8 @@ def test_detect_safe_search_content(self):
         self._assert_safe_search(safe_search)
 
     def test_detect_safe_search_gcs(self):
-        self._maybe_http_skip('gRPC is required for safe search detection.')
+        self._pb_not_implemented_skip(
+            'gRPC not implemented for safe search detection.')
         bucket_name = Config.TEST_BUCKET.name
         blob_name = 'faces.jpg'
         blob = Config.TEST_BUCKET.blob(blob_name)
@@ -395,7 +394,8 @@ def test_detect_safe_search_gcs(self):
         self._assert_safe_search(safe_search)
 
     def test_detect_safe_search_filename(self):
-        self._maybe_http_skip('gRPC is required for safe search detection.')
+        self._pb_not_implemented_skip(
+            'gRPC not implemented for safe search detection.')
         client = Config.CLIENT
         image = client.image(filename=FACE_FILE)
         safe_searches = image.detect_safe_search()
@@ -493,7 +493,8 @@ def _assert_properties(self, image_property):
             self.assertNotEqual(color_info.score, 0.0)
 
     def test_detect_properties_content(self):
-        self._maybe_http_skip('gRPC is required for text detection.')
+        self._pb_not_implemented_skip(
+            'gRPC not implemented for image properties detection.')
         client = Config.CLIENT
         with open(FACE_FILE, 'rb') as image_file:
             image = client.image(content=image_file.read())
@@ -503,7 +504,8 @@ def test_detect_properties_content(self):
         self._assert_properties(image_property)
 
     def test_detect_properties_gcs(self):
-        self._maybe_http_skip('gRPC is required for text detection.')
+        self._pb_not_implemented_skip(
+            'gRPC not implemented for image properties detection.')
         client = Config.CLIENT
         bucket_name = Config.TEST_BUCKET.name
         blob_name = 'faces.jpg'
@@ -521,7 +523,8 @@ def test_detect_properties_gcs(self):
         self._assert_properties(image_property)
 
     def test_detect_properties_filename(self):
-        self._maybe_http_skip('gRPC is required for text detection.')
+        self._pb_not_implemented_skip(
+            'gRPC not implemented for image properties detection.')
         client = Config.CLIENT
         image = client.image(filename=FACE_FILE)
         properties = image.detect_properties()
diff --git a/vision/google/cloud/vision/_gax.py b/vision/google/cloud/vision/_gax.py
index 886d33eb108b..755840e106ac 100644
--- a/vision/google/cloud/vision/_gax.py
+++ b/vision/google/cloud/vision/_gax.py
@@ -17,8 +17,6 @@
 from google.cloud.gapic.vision.v1 import image_annotator_client
 from google.cloud.grpc.vision.v1 import image_annotator_pb2
 
-from google.cloud._helpers import _to_bytes
-
 from google.cloud.vision.annotations import Annotations
 
 
@@ -85,7 +83,7 @@ def _to_gapic_image(image):
               :class:`~google.cloud.vision.image.Image`.
     """
     if image.content is not None:
-        return image_annotator_pb2.Image(content=_to_bytes(image.content))
+        return image_annotator_pb2.Image(content=image.content)
     if image.source is not None:
         return image_annotator_pb2.Image(
             source=image_annotator_pb2.ImageSource(
diff --git a/vision/google/cloud/vision/annotations.py b/vision/google/cloud/vision/annotations.py
index 7550bcd8c676..bb26d413751a 100644
--- a/vision/google/cloud/vision/annotations.py
+++ b/vision/google/cloud/vision/annotations.py
@@ -119,6 +119,7 @@ def _process_image_annotations(image):
     :returns: Dictionary populated with entities from response.
     """
     return {
+        'faces': _make_faces_from_pb(image.face_annotations),
         'labels': _make_entity_from_pb(image.label_annotations),
         'landmarks': _make_entity_from_pb(image.landmark_annotations),
         'logos': _make_entity_from_pb(image.logo_annotations),
@@ -139,6 +140,19 @@ def _make_entity_from_pb(annotations):
     return [EntityAnnotation.from_pb(annotation) for annotation in annotations]
 
 
+def _make_faces_from_pb(faces):
+    """Create face objects from a gRPC response.
+
+    :type faces:
+    :class:`~google.cloud.grpc.vision.v1.image_annotator_pb2.FaceAnnotation`
+    :param faces: Protobuf instance of ``FaceAnnotation``.
+
+    :rtype: list
+    :returns: List of ``Face``.
+    """
+    return [Face.from_pb(face) for face in faces]
+
+
 def _entity_from_response_type(feature_type, results):
     """Convert a JSON result to an entity type based on the feature.
 
diff --git a/vision/google/cloud/vision/client.py b/vision/google/cloud/vision/client.py
index f41eedb345b7..89d9829d16fa 100644
--- a/vision/google/cloud/vision/client.py
+++ b/vision/google/cloud/vision/client.py
@@ -58,7 +58,7 @@ class Client(JSONClient):
     _vision_api_internal = None
 
     def __init__(self, project=None, credentials=None, http=None,
-                 use_gax=False):
+                 use_gax=None):
         super(Client, self).__init__(
             project=project, credentials=credentials, http=http)
         self._connection = Connection(
diff --git a/vision/google/cloud/vision/face.py b/vision/google/cloud/vision/face.py
index 75ed4573cb81..0809ba16b7dd 100644
--- a/vision/google/cloud/vision/face.py
+++ b/vision/google/cloud/vision/face.py
@@ -17,11 +17,26 @@
 
 from enum import Enum
 
+from google.cloud.grpc.vision.v1 import image_annotator_pb2
+
 from google.cloud.vision.geometry import BoundsBase
 from google.cloud.vision.likelihood import Likelihood
 from google.cloud.vision.geometry import Position
 
 
+def _get_pb_likelihood(likelihood):
+    """Convert protobuf Likelihood integer value to Likelihood instance.
+
+    :type likelihood: int
+    :param likelihood: Protobuf integer representing ``Likelihood``.
+
+    :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
+    :returns: Instance of ``Likelihood`` converted from protobuf value.
+    """
+    likelihood_pb = image_annotator_pb2.Likelihood.Name(likelihood)
+    return Likelihood[likelihood_pb]
+
+
 class Angles(object):
     """Angles representing the positions of a face."""
     def __init__(self, roll, pan, tilt):
@@ -30,15 +45,35 @@ def __init__(self, roll, pan, tilt):
         self._tilt = tilt
 
     @classmethod
-    def from_api_repr(cls, response):
+    def from_api_repr(cls, angle):
         """Factory: construct the angles from an Vision API response.
 
+        :type angle: dict
+        :param angle: Dictionary representation of an angle.
+
         :rtype: :class:`~google.cloud.vision.face.Angles`
         :returns: An `Angles` instance with data parsed from `response`.
         """
-        roll = response['rollAngle']
-        pan = response['panAngle']
-        tilt = response['tiltAngle']
+        roll = angle['rollAngle']
+        pan = angle['panAngle']
+        tilt = angle['tiltAngle']
+
+        return cls(roll, pan, tilt)
+
+    @classmethod
+    def from_pb(cls, angle):
+        """Factory: convert protobuf Angle object to local Angle object.
+
+        :type angle: :class:`~google.cloud.grpc.vision.v1.\
+                     image_annotator_pb2.FaceAnnotation`
+        :param angle: Protobuf ``FaceAnnotation`` response with angle data.
+
+        :rtype: :class:`~google.cloud.vision.face.Angles`
+        :returns: Instance of ``Angles``.
+        """
+        roll = angle.roll_angle
+        pan = angle.pan_angle
+        tilt = angle.tilt_angle
 
         return cls(roll, pan, tilt)
 
@@ -84,19 +119,38 @@ def __init__(self, joy_likelihood, sorrow_likelihood,
         self._anger_likelihood = anger_likelihood
 
     @classmethod
-    def from_api_repr(cls, response):
-        """Factory: construct `Emotions` from Vision API response.
+    def from_api_repr(cls, emotions):
+        """Factory: construct ``Emotions`` from Vision API response.
 
-        :type response: dict
-        :param response: Response dictionary representing a face.
+        :type emotions: dict
+        :param emotions: Response dictionary representing a face.
 
         :rtype: :class:`~google.cloud.vision.face.Emotions`
-        :returns: Populated instance of `Emotions`.
+        :returns: Populated instance of ``Emotions``.
         """
-        joy_likelihood = Likelihood[response['joyLikelihood']]
-        sorrow_likelihood = Likelihood[response['sorrowLikelihood']]
-        surprise_likelihood = Likelihood[response['surpriseLikelihood']]
-        anger_likelihood = Likelihood[response['angerLikelihood']]
+        joy_likelihood = Likelihood[emotions['joyLikelihood']]
+        sorrow_likelihood = Likelihood[emotions['sorrowLikelihood']]
+        surprise_likelihood = Likelihood[emotions['surpriseLikelihood']]
+        anger_likelihood = Likelihood[emotions['angerLikelihood']]
+
+        return cls(joy_likelihood, sorrow_likelihood, surprise_likelihood,
+                   anger_likelihood)
+
+    @classmethod
+    def from_pb(cls, emotions):
+        """Factory: construct ``Emotions`` from Vision API response.
+
+        :type emotions: :class:`~google.cloud.grpc.vision.v1.\
+                        image_annotator_pb2.FaceAnnotation`
+        :param emotions: Response dictionary representing a face with emotions.
+
+        :rtype: :class:`~google.cloud.vision.face.Emotions`
+        :returns: Populated instance of ``Emotions``.
+        """
+        joy_likelihood = _get_pb_likelihood(emotions.joy_likelihood)
+        sorrow_likelihood = _get_pb_likelihood(emotions.sorrow_likelihood)
+        surprise_likelihood = _get_pb_likelihood(emotions.surprise_likelihood)
+        anger_likelihood = _get_pb_likelihood(emotions.anger_likelihood)
 
         return cls(joy_likelihood, sorrow_likelihood, surprise_likelihood,
                    anger_likelihood)
@@ -159,28 +213,52 @@ def __init__(self, angles, bounds, detection_confidence, emotions,
         self._image_properties = image_properties
 
     @classmethod
-    def from_api_repr(cls, response):
+    def from_api_repr(cls, face):
         """Factory: construct an instance of a Face from an API response
 
-        :type response: dict
-        :param response: Face annotation dict returned from the Vision API.
+        :type face: dict
+        :param face: Face annotation dict returned from the Vision API.
 
         :rtype: :class:`~google.cloud.vision.face.Face`
         :returns: A instance of `Face` with data parsed from `response`.
         """
-        angles = Angles.from_api_repr(response)
-        bounds = Bounds.from_api_repr(response['boundingPoly'])
-        detection_confidence = response['detectionConfidence']
-        emotions = Emotions.from_api_repr(response)
-        fd_bounds = FDBounds.from_api_repr(response['fdBoundingPoly'])
-        headwear_likelihood = Likelihood[response['headwearLikelihood']]
-        image_properties = FaceImageProperties.from_api_repr(response)
-        landmarks = Landmarks(response['landmarks'])
-        landmarking_confidence = response['landmarkingConfidence']
+        face_data = {
+            'angles': Angles.from_api_repr(face),
+            'bounds': Bounds.from_api_repr(face['boundingPoly']),
+            'detection_confidence': face['detectionConfidence'],
+            'emotions': Emotions.from_api_repr(face),
+            'fd_bounds': FDBounds.from_api_repr(face['fdBoundingPoly']),
+            'headwear_likelihood': Likelihood[face['headwearLikelihood']],
+            'image_properties': FaceImageProperties.from_api_repr(face),
+            'landmarks': Landmarks.from_api_repr(face['landmarks']),
+            'landmarking_confidence': face['landmarkingConfidence'],
+        }
+        return cls(**face_data)
+
+    @classmethod
+    def from_pb(cls, face):
+        """Factory: construct an instance of a Face from an protobuf response
+
+        :type face: :class:`~google.cloud.grpc.vision.v1.\
+                       image_annotator_pb2.AnnotateImageResponse`
+        :param face: ``AnnotateImageResponse`` from gRPC call.
 
-        return cls(angles, bounds, detection_confidence, emotions, fd_bounds,
-                   headwear_likelihood, image_properties, landmarks,
-                   landmarking_confidence)
+        :rtype: :class:`~google.cloud.vision.face.Face`
+        :returns: A instance of `Face` with data parsed from ``response``.
+        """
+        face_data = {
+            'angles': Angles.from_pb(face),
+            'bounds': Bounds.from_pb(face.bounding_poly),
+            'detection_confidence': face.detection_confidence,
+            'emotions': Emotions.from_pb(face),
+            'fd_bounds': FDBounds.from_pb(face.fd_bounding_poly),
+            'headwear_likelihood': _get_pb_likelihood(
+                face.headwear_likelihood),
+            'image_properties': FaceImageProperties.from_pb(face),
+            'landmarks': Landmarks.from_pb(face.landmarks),
+            'landmarking_confidence': face.landmarking_confidence,
+        }
+        return cls(**face_data)
 
     @property
     def anger(self):
@@ -315,14 +393,33 @@ def __init__(self, blurred_likelihood, underexposed_likelihood):
         self._underexposed_likelihood = underexposed_likelihood
 
     @classmethod
-    def from_api_repr(cls, response):
+    def from_api_repr(cls, face):
         """Factory: construct image properties from image.
 
+        :type face: dict
+        :param face: Dictionary representation of a ``Face``.
+
         :rtype: :class:`~google.cloud.vision.face.FaceImageProperties`
         :returns: Instance populated with image property data.
         """
-        blurred = Likelihood[response['blurredLikelihood']]
-        underexposed = Likelihood[response['underExposedLikelihood']]
+        blurred = Likelihood[face['blurredLikelihood']]
+        underexposed = Likelihood[face['underExposedLikelihood']]
+
+        return cls(blurred, underexposed)
+
+    @classmethod
+    def from_pb(cls, face):
+        """Factory: construct image properties from image.
+
+        :type face: :class:`~google.cloud.grpc.vision.v1.image_annotator_pb2.\
+                    FaceAnnotation`
+        :param face: Protobuf instace of `Face`.
+
+        :rtype: :class:`~google.cloud.vision.face.FaceImageProperties`
+        :returns: Instance populated with image property data.
+        """
+        blurred = _get_pb_likelihood(face.blurred_likelihood)
+        underexposed = _get_pb_likelihood(face.under_exposed_likelihood)
 
         return cls(blurred, underexposed)
 
@@ -353,41 +450,41 @@ class LandmarkTypes(Enum):
     See:
     https://cloud.google.com/vision/reference/rest/v1/images/annotate#Type_1
     """
-    UNKNOWN_LANDMARK = 'UNKNOWN_LANDMARK'
-    LEFT_EYE = 'LEFT_EYE'
-    RIGHT_EYE = 'RIGHT_EYE'
-    LEFT_OF_LEFT_EYEBROW = 'LEFT_OF_LEFT_EYEBROW'
-    RIGHT_OF_LEFT_EYEBROW = 'RIGHT_OF_LEFT_EYEBROW'
-    LEFT_OF_RIGHT_EYEBROW = 'LEFT_OF_RIGHT_EYEBROW'
-    RIGHT_OF_RIGHT_EYEBROW = 'RIGHT_OF_RIGHT_EYEBROW'
-    MIDPOINT_BETWEEN_EYES = 'MIDPOINT_BETWEEN_EYES'
-    NOSE_TIP = 'NOSE_TIP'
-    UPPER_LIP = 'UPPER_LIP'
-    LOWER_LIP = 'LOWER_LIP'
-    MOUTH_LEFT = 'MOUTH_LEFT'
-    MOUTH_RIGHT = 'MOUTH_RIGHT'
-    MOUTH_CENTER = 'MOUTH_CENTER'
-    NOSE_BOTTOM_RIGHT = 'NOSE_BOTTOM_RIGHT'
-    NOSE_BOTTOM_LEFT = 'NOSE_BOTTOM_LEFT'
-    NOSE_BOTTOM_CENTER = 'NOSE_BOTTOM_CENTER'
-    LEFT_EYE_TOP_BOUNDARY = 'LEFT_EYE_TOP_BOUNDARY'
-    LEFT_EYE_RIGHT_CORNER = 'LEFT_EYE_RIGHT_CORNER'
-    LEFT_EYE_BOTTOM_BOUNDARY = 'LEFT_EYE_BOTTOM_BOUNDARY'
-    LEFT_EYE_LEFT_CORNER = 'LEFT_EYE_LEFT_CORNER'
-    RIGHT_EYE_TOP_BOUNDARY = 'RIGHT_EYE_TOP_BOUNDARY'
-    RIGHT_EYE_RIGHT_CORNER = 'RIGHT_EYE_RIGHT_CORNER'
-    RIGHT_EYE_BOTTOM_BOUNDARY = 'RIGHT_EYE_BOTTOM_BOUNDARY'
-    RIGHT_EYE_LEFT_CORNER = 'RIGHT_EYE_LEFT_CORNER'
-    LEFT_EYEBROW_UPPER_MIDPOINT = 'LEFT_EYEBROW_UPPER_MIDPOINT'
-    RIGHT_EYEBROW_UPPER_MIDPOINT = 'RIGHT_EYEBROW_UPPER_MIDPOINT'
-    LEFT_EAR_TRAGION = 'LEFT_EAR_TRAGION'
-    RIGHT_EAR_TRAGION = 'RIGHT_EAR_TRAGION'
-    LEFT_EYE_PUPIL = 'LEFT_EYE_PUPIL'
-    RIGHT_EYE_PUPIL = 'RIGHT_EYE_PUPIL'
-    FOREHEAD_GLABELLA = 'FOREHEAD_GLABELLA'
-    CHIN_GNATHION = 'CHIN_GNATHION'
-    CHIN_LEFT_GONION = 'CHIN_LEFT_GONION'
-    CHIN_RIGHT_GONION = 'CHIN_RIGHT_GONION'
+    UNKNOWN_LANDMARK = 0
+    LEFT_EYE = 1
+    RIGHT_EYE = 2
+    LEFT_OF_LEFT_EYEBROW = 3
+    RIGHT_OF_LEFT_EYEBROW = 4
+    LEFT_OF_RIGHT_EYEBROW = 5
+    RIGHT_OF_RIGHT_EYEBROW = 6
+    MIDPOINT_BETWEEN_EYES = 7
+    NOSE_TIP = 8
+    UPPER_LIP = 9
+    LOWER_LIP = 10
+    MOUTH_LEFT = 11
+    MOUTH_RIGHT = 12
+    MOUTH_CENTER = 13
+    NOSE_BOTTOM_RIGHT = 14
+    NOSE_BOTTOM_LEFT = 15
+    NOSE_BOTTOM_CENTER = 16
+    LEFT_EYE_TOP_BOUNDARY = 17
+    LEFT_EYE_RIGHT_CORNER = 18
+    LEFT_EYE_BOTTOM_BOUNDARY = 19
+    LEFT_EYE_LEFT_CORNER = 20
+    RIGHT_EYE_TOP_BOUNDARY = 21
+    RIGHT_EYE_RIGHT_CORNER = 22
+    RIGHT_EYE_BOTTOM_BOUNDARY = 23
+    RIGHT_EYE_LEFT_CORNER = 24
+    LEFT_EYEBROW_UPPER_MIDPOINT = 25
+    RIGHT_EYEBROW_UPPER_MIDPOINT = 26
+    LEFT_EAR_TRAGION = 27
+    RIGHT_EAR_TRAGION = 28
+    LEFT_EYE_PUPIL = 29
+    RIGHT_EYE_PUPIL = 30
+    FOREHEAD_GLABELLA = 31
+    CHIN_GNATHION = 32
+    CHIN_LEFT_GONION = 33
+    CHIN_RIGHT_GONION = 34
 
 
 class FDBounds(BoundsBase):
@@ -395,23 +492,45 @@ class FDBounds(BoundsBase):
 
 
 class Landmark(object):
-    """A face-specific landmark (for example, a face feature, left eye)."""
+    """A face-specific landmark (for example, a face feature, left eye).
+
+    :type landmark_type: :class:`~google.cloud.vision.face.LandmarkTypes`
+    :param landmark_type: Instance of ``LandmarkTypes``.
+
+    :type position: :class:`~google.cloud.vision.face.Position`
+    :param position:
+    """
     def __init__(self, position, landmark_type):
         self._position = position
         self._landmark_type = landmark_type
 
     @classmethod
-    def from_api_repr(cls, response_landmark):
+    def from_api_repr(cls, landmark):
+        """Factory: construct an instance of a Landmark from a response.
+
+        :type landmark: dict
+        :param landmark: Landmark representation from Vision API.
+
+        :rtype: :class:`~google.cloud.vision.face.Landmark`
+        :returns: Populated instance of ``Landmark``.
+        """
+        position = Position.from_api_repr(landmark['position'])
+        landmark_type = LandmarkTypes[landmark['type']]
+        return cls(position, landmark_type)
+
+    @classmethod
+    def from_pb(cls, landmark):
         """Factory: construct an instance of a Landmark from a response.
 
-        :type response_landmark: dict
-        :param response_landmark: Landmark representation from Vision API.
+        :type landmark: :class:`~google.cloud.grpc.vision.v1.\
+                        image_annotator_pb.FaceAnnotation.Landmark`
+        :param landmark: Landmark representation from Vision API.
 
         :rtype: :class:`~google.cloud.vision.face.Landmark`
-        :returns: Populated instance of `Landmark`.
+        :returns: Populated instance of ``Landmark``.
         """
-        position = Position.from_api_repr(response_landmark['position'])
-        landmark_type = LandmarkTypes[response_landmark['type']]
+        position = Position.from_pb(landmark.position)
+        landmark_type = LandmarkTypes(landmark.type)
         return cls(position, landmark_type)
 
     @property
@@ -434,8 +553,37 @@ def landmark_type(self):
 
 
 class Landmarks(object):
-    """Landmarks detected on a face represented as properties."""
+    """Landmarks detected on a face represented as properties.
+
+    :type landmarks: list
+    :param landmarks: List of :class:`~google.cloud.vision.face.Landmark`.
+    """
     def __init__(self, landmarks):
-        for landmark_response in landmarks:
-            landmark = Landmark.from_api_repr(landmark_response)
-            setattr(self, landmark.landmark_type.value.lower(), landmark)
+        for landmark in landmarks:
+            setattr(self, landmark.landmark_type.name.lower(), landmark)
+
+    @classmethod
+    def from_api_repr(cls, landmarks):
+        """Factory: construct facial landmarks from Vision API response.
+
+        :type landmarks: dict
+        :param landmarks: JSON face annotation.
+
+        :rtype: :class:`~google.cloud.vision.face.Landmarks`
+        :returns: Instance of ``Landmarks`` populated with facial landmarks.
+        """
+        return cls([Landmark.from_api_repr(landmark)
+                    for landmark in landmarks])
+
+    @classmethod
+    def from_pb(cls, landmarks):
+        """Factory: construct facial landmarks from Vision gRPC response.
+
+        :type landmarks: :class:`~google.protobuf.internal.containers.\
+                         RepeatedCompositeFieldContainer`
+        :param landmarks: List of facial landmarks.
+
+        :rtype: :class:`~google.cloud.vision.face.Landmarks`
+        :returns: Instance of ``Landmarks`` populated with facial landmarks.
+        """
+        return cls([Landmark.from_pb(landmark) for landmark in landmarks])
diff --git a/vision/google/cloud/vision/geometry.py b/vision/google/cloud/vision/geometry.py
index 6b477d8b300e..39b429a32ed8 100644
--- a/vision/google/cloud/vision/geometry.py
+++ b/vision/google/cloud/vision/geometry.py
@@ -32,7 +32,7 @@ def from_api_repr(cls, vertices):
         :param vertices: List of vertices.
 
         :rtype: :class:`~google.cloud.vision.geometry.BoundsBase` or None
-        :returns: Instance of BoundsBase with populated verticies or None.
+        :returns: Instance of ``BoundsBase`` with populated verticies or None.
         """
         if vertices is None:
             return None
@@ -41,7 +41,7 @@ def from_api_repr(cls, vertices):
 
     @classmethod
     def from_pb(cls, vertices):
-        """Factory: construct BoundsBase instance from Vision gRPC response.
+        """Factory: construct BoundsBase instance from a protobuf response.
 
         :type vertices: :class:`~google.cloud.grpc.vision.v1.\
                                  geometry_pb2.BoundingPoly`
@@ -102,10 +102,10 @@ def from_api_repr(cls, location_info):
 
     @classmethod
     def from_pb(cls, location_info):
-        """Factory: construct location information from Vision gRPC response.
+        """Factory: construct location information from a protobuf response.
 
         :type location_info: :class:`~google.cloud.vision.v1.LocationInfo`
-        :param location_info: gRPC response of ``LocationInfo``.
+        :param location_info: Protobuf response with ``LocationInfo``.
 
         :rtype: :class:`~google.cloud.vision.geometry.LocationInformation`
         :returns: ``LocationInformation`` with populated latitude and
@@ -148,7 +148,8 @@ class Position(object):
     :type z_coordinate: float
     :param z_coordinate: Z position coordinate.
     """
-    def __init__(self, x_coordinate, y_coordinate, z_coordinate):
+    def __init__(self, x_coordinate=None, y_coordinate=None,
+                 z_coordinate=None):
         self._x_coordinate = x_coordinate
         self._y_coordinate = y_coordinate
         self._z_coordinate = z_coordinate
@@ -161,13 +162,25 @@ def from_api_repr(cls, position):
         :param position: Dictionary with 3 axis position data.
 
         :rtype: :class:`~google.cloud.vision.geometry.Position`
-        :returns: `Position` constructed with 3D points from API response.
+        :returns: ``Position`` constructed with 3D points from API response.
         """
         x_coordinate = position['x']
         y_coordinate = position['y']
         z_coordinate = position['z']
         return cls(x_coordinate, y_coordinate, z_coordinate)
 
+    @classmethod
+    def from_pb(cls, response_position):
+        """Factory: construct 3D position from API response.
+
+        :rtype: :class:`~google.cloud.vision.geometry.Position`
+        :returns: ``Position`` constructed with 3D points from API response.
+        """
+        x_coordinate = response_position.x
+        y_coordinate = response_position.y
+        z_coordinate = response_position.z
+        return cls(x_coordinate, y_coordinate, z_coordinate)
+
     @property
     def x_coordinate(self):
         """X position coordinate.
@@ -208,7 +221,7 @@ class Vertex(object):
     :type y_coordinate: float
     :param y_coordinate: Y position coordinate.
     """
-    def __init__(self, x_coordinate, y_coordinate):
+    def __init__(self, x_coordinate=None, y_coordinate=None):
         self._x_coordinate = x_coordinate
         self._y_coordinate = y_coordinate
 
diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py
index 52f6cfad9869..9283e83ba960 100644
--- a/vision/google/cloud/vision/image.py
+++ b/vision/google/cloud/vision/image.py
@@ -54,7 +54,7 @@ def __init__(self, client, content=None, filename=None, source_uri=None):
                 content = file_obj.read()
 
         if content is not None:
-            content = _bytes_to_unicode(b64encode(_to_bytes(content)))
+            content = _to_bytes(content)
 
         self._content = content
         self._source = source_uri
@@ -67,7 +67,7 @@ def as_dict(self):
         """
         if self.content:
             return {
-                'content': self.content
+                'content': _bytes_to_unicode(b64encode(self.content))
             }
         else:
             return {
diff --git a/vision/unit_tests/test__gax.py b/vision/unit_tests/test__gax.py
index 02ef77362f37..1e1dfbeaf7e6 100644
--- a/vision/unit_tests/test__gax.py
+++ b/vision/unit_tests/test__gax.py
@@ -131,17 +131,15 @@ def _call_fut(self, image):
         return _to_gapic_image(image)
 
     def test__to_gapic_image_content(self):
-        import base64
         from google.cloud.vision.image import Image
         from google.cloud.grpc.vision.v1 import image_annotator_pb2
 
         image_content = b'abc 1 2 3'
-        b64_content = base64.b64encode(image_content)
         client = object()
         image = Image(client, content=image_content)
         image_pb = self._call_fut(image)
         self.assertIsInstance(image_pb, image_annotator_pb2.Image)
-        self.assertEqual(image_pb.content, b64_content)
+        self.assertEqual(image_pb.content, image_content)
 
     def test__to_gapic_image_uri(self):
         from google.cloud.vision.image import Image
diff --git a/vision/unit_tests/test_annotations.py b/vision/unit_tests/test_annotations.py
index b176f8490859..4ea57988174f 100644
--- a/vision/unit_tests/test_annotations.py
+++ b/vision/unit_tests/test_annotations.py
@@ -73,7 +73,7 @@ def test_from_pb(self):
         annotations = self._make_one().from_pb(image_response)
         self.assertEqual(annotations.labels, [])
         self.assertEqual(annotations.logos, [])
-        self.assertEqual(annotations.faces, ())
+        self.assertEqual(annotations.faces, [])
         self.assertEqual(annotations.landmarks, [])
         self.assertEqual(annotations.texts, [])
         self.assertEqual(annotations.safe_searches, ())
@@ -107,6 +107,21 @@ def test_it(self):
         self.assertEqual(entity.locations[0].longitude, 2.0)
 
 
+class Test__make_faces_from_pb(unittest.TestCase):
+    def _call_fut(self, annotations):
+        from google.cloud.vision.annotations import _make_faces_from_pb
+        return _make_faces_from_pb(annotations)
+
+    def test_it(self):
+        from google.cloud.grpc.vision.v1 import image_annotator_pb2
+        from google.cloud.vision.face import Face
+
+        faces_pb = [image_annotator_pb2.FaceAnnotation()]
+
+        faces = self._call_fut(faces_pb)
+        self.assertIsInstance(faces[0], Face)
+
+
 class Test__process_image_annotations(unittest.TestCase):
     def _call_fut(self, image):
         from google.cloud.vision.annotations import _process_image_annotations
diff --git a/vision/unit_tests/test_client.py b/vision/unit_tests/test_client.py
index ccf3609a5ed6..6191d19571a0 100644
--- a/vision/unit_tests/test_client.py
+++ b/vision/unit_tests/test_client.py
@@ -97,7 +97,8 @@ def test_face_annotation(self):
             ]
         }
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         features = [Feature(feature_type=FeatureTypes.FACE_DETECTION,
@@ -123,11 +124,11 @@ def test_image_with_client_raw_content(self):
         from google.cloud.vision.image import Image
 
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT,
-                                credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         raw_image = client.image(content=IMAGE_CONTENT)
         self.assertIsInstance(raw_image, Image)
-        self.assertEqual(raw_image.content, B64_IMAGE_CONTENT)
+        self.assertEqual(raw_image.content, IMAGE_CONTENT)
 
     def test_image_with_client_filename(self):
         from mock import mock_open
@@ -135,14 +136,14 @@ def test_image_with_client_filename(self):
         from google.cloud.vision.image import Image
 
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT,
-                                credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         with patch('google.cloud.vision.image.open',
                    mock_open(read_data=IMAGE_CONTENT)) as m:
             file_image = client.image(filename='my_image.jpg')
         m.assert_called_once_with('my_image.jpg', 'rb')
         self.assertIsInstance(file_image, Image)
-        self.assertEqual(file_image.content, B64_IMAGE_CONTENT)
+        self.assertEqual(file_image.content, IMAGE_CONTENT)
 
     def test_multiple_detection_from_content(self):
         import copy
@@ -156,7 +157,8 @@ def test_multiple_detection_from_content(self):
         returned['responses'][0]['logoAnnotations'] = logos['logoAnnotations']
 
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(returned)
 
         limit = 2
@@ -203,7 +205,8 @@ def test_face_detection_from_source(self):
         from unit_tests._fixtures import FACE_DETECTION_RESPONSE
         RETURNED = FACE_DETECTION_RESPONSE
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(source_uri=IMAGE_SOURCE)
@@ -220,7 +223,8 @@ def test_face_detection_from_content(self):
         from unit_tests._fixtures import FACE_DETECTION_RESPONSE
         RETURNED = FACE_DETECTION_RESPONSE
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(content=IMAGE_CONTENT)
@@ -238,7 +242,8 @@ def test_face_detection_from_content_no_results(self):
             'responses': [{}]
         }
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(content=IMAGE_CONTENT)
@@ -257,7 +262,8 @@ def test_label_detection_from_source(self):
             LABEL_DETECTION_RESPONSE as RETURNED)
 
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(source_uri=IMAGE_SOURCE)
@@ -278,7 +284,8 @@ def test_label_detection_no_results(self):
             'responses': [{}]
         }
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(content=IMAGE_CONTENT)
@@ -292,7 +299,8 @@ def test_landmark_detection_from_source(self):
             LANDMARK_DETECTION_RESPONSE as RETURNED)
 
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(source_uri=IMAGE_SOURCE)
@@ -314,7 +322,8 @@ def test_landmark_detection_from_content(self):
             LANDMARK_DETECTION_RESPONSE as RETURNED)
 
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(content=IMAGE_CONTENT)
@@ -331,7 +340,8 @@ def test_landmark_detection_no_results(self):
             'responses': [{}]
         }
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(content=IMAGE_CONTENT)
@@ -344,7 +354,8 @@ def test_logo_detection_from_source(self):
         from unit_tests._fixtures import LOGO_DETECTION_RESPONSE
         RETURNED = LOGO_DETECTION_RESPONSE
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(source_uri=IMAGE_SOURCE)
@@ -361,7 +372,8 @@ def test_logo_detection_from_content(self):
         from unit_tests._fixtures import LOGO_DETECTION_RESPONSE
         RETURNED = LOGO_DETECTION_RESPONSE
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(content=IMAGE_CONTENT)
@@ -379,7 +391,8 @@ def test_text_detection_from_source(self):
             TEXT_DETECTION_RESPONSE as RETURNED)
 
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(source_uri=IMAGE_SOURCE)
@@ -402,7 +415,8 @@ def test_safe_search_detection_from_source(self):
 
         RETURNED = SAFE_SEARCH_DETECTION_RESPONSE
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(source_uri=IMAGE_SOURCE)
@@ -421,7 +435,8 @@ def test_safe_search_no_results(self):
             'responses': [{}]
         }
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(content=IMAGE_CONTENT)
@@ -435,7 +450,8 @@ def test_image_properties_detection_from_source(self):
 
         RETURNED = IMAGE_PROPERTIES_RESPONSE
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(source_uri=IMAGE_SOURCE)
@@ -457,7 +473,8 @@ def test_image_properties_no_results(self):
             'responses': [{}]
         }
         credentials = _make_credentials()
-        client = self._make_one(project=PROJECT, credentials=credentials)
+        client = self._make_one(project=PROJECT, credentials=credentials,
+                                use_gax=False)
         client._connection = _Connection(RETURNED)
 
         image = client.image(content=IMAGE_CONTENT)
diff --git a/vision/unit_tests/test_face.py b/vision/unit_tests/test_face.py
index 7e4739056f08..801479bccc44 100644
--- a/vision/unit_tests/test_face.py
+++ b/vision/unit_tests/test_face.py
@@ -21,6 +21,11 @@ def _get_target_class():
         from google.cloud.vision.face import Face
         return Face
 
+    def _make_face_pb(self, *args, **kwargs):
+        from google.cloud.grpc.vision.v1 import image_annotator_pb2
+
+        return image_annotator_pb2.FaceAnnotation(*args, **kwargs)
+
     def setUp(self):
         from unit_tests._fixtures import FACE_DETECTION_RESPONSE
         self.FACE_ANNOTATIONS = FACE_DETECTION_RESPONSE['responses'][0]
@@ -28,6 +33,18 @@ def setUp(self):
         self.face = self.face_class.from_api_repr(
             self.FACE_ANNOTATIONS['faceAnnotations'][0])
 
+    def test_face_from_pb(self):
+        from google.cloud.grpc.vision.v1 import image_annotator_pb2
+        from google.cloud.grpc.vision.v1 import geometry_pb2
+
+        position_pb = geometry_pb2.Position(x=1.0, y=2.0, z=3.0)
+        landmark_pb = image_annotator_pb2.FaceAnnotation.Landmark(
+            position=position_pb, type=5)
+        face_pb = self._make_face_pb(landmarks=[landmark_pb])
+
+        face = self._get_target_class().from_pb(face_pb)
+        self.assertIsInstance(face, self._get_target_class())
+
     def test_face_landmarks(self):
         from google.cloud.vision.face import LandmarkTypes
 
diff --git a/vision/unit_tests/test_geometry.py b/vision/unit_tests/test_geometry.py
new file mode 100644
index 000000000000..07964b7988a1
--- /dev/null
+++ b/vision/unit_tests/test_geometry.py
@@ -0,0 +1,30 @@
+# Copyright 2016 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+
+class TestVertext(unittest.TestCase):
+    @staticmethod
+    def _get_target_class():
+        from google.cloud.vision.geometry import Vertex
+        return Vertex
+
+    def _make_one(self, x_coordinate, y_coordinate):
+        return self._get_target_class()(x_coordinate, y_coordinate)
+
+    def test_vertex_with_zeros(self):
+        vertex = self._make_one(0.0, 0.0)
+        self.assertEqual(vertex.x_coordinate, 0.0)
+        self.assertEqual(vertex.y_coordinate, 0.0)
diff --git a/vision/unit_tests/test_image.py b/vision/unit_tests/test_image.py
index d9a90e9845ec..f9f00d98b3fd 100644
--- a/vision/unit_tests/test_image.py
+++ b/vision/unit_tests/test_image.py
@@ -46,7 +46,7 @@ def test_must_set_one_source(self):
                            source_uri=IMAGE_SOURCE, filename='myimage.jpg')
 
         image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT)
-        self.assertEqual(image.content, B64_IMAGE_CONTENT)
+        self.assertEqual(image.content, IMAGE_CONTENT)
 
     def test_image_source_type_content(self):
         image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT)
@@ -55,8 +55,8 @@ def test_image_source_type_content(self):
             'content': B64_IMAGE_CONTENT,
         }
 
-        self.assertEqual(B64_IMAGE_CONTENT, image.content)
-        self.assertEqual(None, image.source)
+        self.assertEqual(image.content, IMAGE_CONTENT)
+        self.assertIsNone(image.source)
         self.assertEqual(image.as_dict(), as_dict)
 
     def test_image_source_type_google_cloud_storage(self):
@@ -75,7 +75,7 @@ def test_image_source_type_google_cloud_storage(self):
     def test_cannot_set_both_source_and_content(self):
         image = self._make_one(CLIENT_MOCK, content=IMAGE_CONTENT)
 
-        self.assertEqual(B64_IMAGE_CONTENT, image.content)
+        self.assertEqual(image.content, IMAGE_CONTENT)
         with self.assertRaises(AttributeError):
             image.source = IMAGE_SOURCE
 
@@ -96,5 +96,5 @@ def test_image_from_filename(self):
                    mock_open(read_data=IMAGE_CONTENT)) as m:
             image = self._make_one(CLIENT_MOCK, filename='my-image-file.jpg')
         m.assert_called_once_with('my-image-file.jpg', 'rb')
-        self.assertEqual(image.content, B64_IMAGE_CONTENT)
+        self.assertEqual(image.content, IMAGE_CONTENT)
         self.assertEqual(image.as_dict(), as_dict)