Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added support for output_dim to be a tuple #1070

Merged
merged 28 commits into from
Mar 2, 2021
Merged
Show file tree
Hide file tree
Changes from 27 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
67f5980
Added support for output_dim to be a tuple
Feb 6, 2021
1a2354c
Updated CHANGELOG.md
Feb 6, 2021
b271923
Merge branch 'master' into tuples-keraslayer
josh146 Feb 8, 2021
86a76cb
Merge remote-tracking branch 'upstream/master' into tuples-keraslayer
Feb 8, 2021
7fd0bb3
Merge branch 'tuples-keraslayer' of https://github.com/kessler-frost/…
Feb 8, 2021
5243509
Updated compute_output_shape and output_dim in KerasLayer
Feb 8, 2021
418aa1f
Removed mandation of output_dim to become a tuple
Feb 9, 2021
82c583f
Merged with the master branch
Feb 9, 2021
6c00cf1
Merged with main and minor fixes for black
Feb 9, 2021
b287631
Help needed for dimenstionality issues
Feb 9, 2021
491ca28
Accidentally pushed wrong file
Feb 9, 2021
b78574a
Merge branch 'master' into tuples-keraslayer
Feb 9, 2021
a1736a1
Merge branch 'master' into tuples-keraslayer
Feb 10, 2021
437fb49
Fixed the sampling of output_dim
Feb 10, 2021
fb55888
Added test_model_gradients_for_denstiy_matrix()
Feb 10, 2021
2a2aa14
Improved testing for keras integration
Feb 10, 2021
5eb1b2f
Merge branch 'master' into tuples-keraslayer
Feb 12, 2021
287138f
Merge branch 'master' into tuples-keraslayer
trbromley Feb 17, 2021
398cbe3
Applied suggestions for removing hidden logic
Feb 19, 2021
890b60d
Added testing for real values of dm in the model
Feb 19, 2021
fa0d188
Merge remote-tracking branch 'upstream/master' into tuples-keraslayer
Feb 20, 2021
243db53
Skipping dm tests for disabled tape_mode
Feb 20, 2021
8a52e50
Merge branch 'master' into tuples-keraslayer
Feb 21, 2021
d076b57
Merge branch 'master' into tuples-keraslayer
Feb 24, 2021
5082db0
New suggestions implemented
Feb 24, 2021
d3e2608
Merge branch 'master' into tuples-keraslayer
Mar 1, 2021
ca327fe
Update .github/CHANGELOG.md
trbromley Mar 2, 2021
554b887
Update pennylane/qnn/keras.py
trbromley Mar 2, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,9 @@
allowing QNode measurement statistics to work on devices with more than 32 qubits.
[(#1088)](https://github.com/PennyLaneAI/pennylane/pull/1088)

* Due to the addition of `density_matrix()` as a return type from a QNode, tuples are now supported by the `output_dim` parameter in `qnn.KerasLayer`.
[(#1070)](https://github.com/PennyLaneAI/pennylane/pull/1070)

<h3>Breaking changes</h3>

* If creating a QNode from a quantum function with an argument named `shots`,
Expand Down Expand Up @@ -172,7 +175,7 @@

This release contains contributions from (in alphabetical order):

Thomas Bromley, Kyle Godbey, Josh Izaac, Daniel Polatajko, Chase Roberts, Maria Schuld.
Thomas Bromley, Kyle Godbey, Josh Izaac, Daniel Polatajko, Chase Roberts, Sankalp Sanand, Maria Schuld.



Expand Down
21 changes: 18 additions & 3 deletions pennylane/qnn/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,16 @@ def qnode(inputs, weights):
100/100 [==============================] - 9s 87ms/sample - loss: 0.1474

.. _Layer: https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer

**Returning a state**

If your QNode returns the state of the quantum circuit using :func:`~.state` or
:func:`~.density_matrix`, you must immediately follow your quantum Keras Layer with a layer
that casts to reals. For example, you could use
`tf.keras.layers.Lambda <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Lambda>`__
with the function ``lambda x: tf.abs(x)``. This casting is required because TensorFlow's
Keras layers require a real input and are differentiated with respect to real parameters.

trbromley marked this conversation as resolved.
Show resolved Hide resolved
"""

def __init__(
Expand Down Expand Up @@ -216,8 +226,13 @@ def __init__(
self._signature_validation(qnode, weight_shapes)
kessler-frost marked this conversation as resolved.
Show resolved Hide resolved
self.qnode = to_tf(qnode, dtype=tf.keras.backend.floatx())

# Allows output_dim to be specified as an int, e.g., 5, or as a length-1 tuple, e.g., (5,)
self.output_dim = output_dim[0] if isinstance(output_dim, Iterable) else output_dim
# Allows output_dim to be specified as an int or as a tuple, e.g, 5, (5,), (5, 2), [5, 2]
# Note: Single digit values will be considered an int and multiple as a tuple, e.g [5,] or (5,)
# are passed as integer 5 and [5, 2] will be passes as tuple (5, 2)
if isinstance(output_dim, Iterable) and len(output_dim) > 1:
self.output_dim = tuple(output_dim)
else:
self.output_dim = output_dim[0] if isinstance(output_dim, Iterable) else output_dim
trbromley marked this conversation as resolved.
Show resolved Hide resolved

self.weight_specs = weight_specs if weight_specs is not None else {}

Expand Down Expand Up @@ -360,7 +375,7 @@ def compute_output_shape(self, input_shape):
Returns:
tf.TensorShape: shape of output data
"""
return tf.TensorShape([input_shape[0], self.output_dim])
return tf.TensorShape(input_shape[0]).concatenate(self.output_dim)

def __str__(self):
detail = "<Quantum Keras Layer: func={}>"
Expand Down
37 changes: 37 additions & 0 deletions tests/qnn/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
"""
import pytest
import pennylane as qml
import numpy as np


@pytest.fixture
Expand Down Expand Up @@ -50,3 +51,39 @@ def circuit(inputs, w1, w2, w3, w4, w5, w6, w7):
return [qml.expval(qml.PauliZ(i)) for i in range(output_dim)]

return circuit, weight_shapes


@pytest.fixture
def get_circuit_dm(n_qubits, output_dim, interface, tape_mode):
"""Fixture for getting a sample quantum circuit with a controllable qubit number and output
dimension for density matrix return type. Returns both the circuit and the shape of the weights."""

dev = qml.device("default.qubit", wires=n_qubits)
weight_shapes = {
"w1": (3, n_qubits, 3),
"w2": (1,),
"w3": 1,
"w4": [3],
"w5": (2, n_qubits, 3),
"w6": 3,
"w7": 0,
}

@qml.qnode(dev, interface=interface)
def circuit(inputs, w1, w2, w3, w4, w5, w6, w7):
"""Sample circuit to be used for testing density_matrix() return type.
"""
qml.templates.AngleEmbedding(inputs, wires=list(range(n_qubits)))
qml.templates.StronglyEntanglingLayers(w1, wires=list(range(n_qubits)))
qml.RX(w2[0], wires=0 % n_qubits)
qml.RX(w3, wires=1 % n_qubits)
qml.Rot(*w4, wires=2 % n_qubits)
qml.templates.StronglyEntanglingLayers(w5, wires=list(range(n_qubits)))
qml.Rot(*w6, wires=3 % n_qubits)
qml.RX(w7, wires=4 % n_qubits)

# Using np.log2() here because output_dim is sampled from varying the number of
# qubits (say, nq) and calculated as (2 ** nq, 2 ** nq)
return qml.density_matrix(wires=[i for i in range(int(np.log2(output_dim[0])))])

return circuit, weight_shapes
106 changes: 103 additions & 3 deletions tests/qnn/test_keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
pytestmark = pytest.mark.usefixtures("tape_mode")


@pytest.mark.usefixtures("get_circuit")
@pytest.fixture
def model(get_circuit, n_qubits, output_dim):
"""Fixture for creating a hybrid Keras model. The model is composed of KerasLayers sandwiched
Expand All @@ -47,13 +46,52 @@ def model(get_circuit, n_qubits, output_dim):
return model


@pytest.fixture
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this work without
@pytest.mark.usefixtures("get_circuit_dm")
?

(we have @pytest.mark.usefixtures("get_circuit") for model())

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, apparently it is mentioned in pytest's documentation that fixtures cannot use other fixtures in this way. I suggest we remove the @pytest.mark.usefixtures("get_circuit") for model() too.

Copy link
Contributor Author

@kessler-frost kessler-frost Feb 24, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

AFAIK the fixture get_circuit or get_circuit_dm is getting used when we call the @pytest.mark.usefixture decorator on TestKerasLayerIntegration and TestKerasLayerIntegrationDM classes respectively. Thus, we do not need to use it for model() and model_dm().

def model_dm(get_circuit_dm, n_qubits, output_dim):
c, w = get_circuit_dm
layer1 = KerasLayer(c, w, output_dim)
layer2 = KerasLayer(c, w, output_dim)

model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(n_qubits),
layer1,
# Adding a lambda layer to take only the real values from density matrix
tf.keras.layers.Lambda(lambda x: tf.abs(x)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(n_qubits),
layer2,
# Adding a lambda layer to take only the real values from density matrix
tf.keras.layers.Lambda(lambda x: tf.abs(x)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(output_dim[0] * output_dim[1])
]
)

return model


def indices_up_to(n_max):
"""Returns an iterator over the number of qubits and output dimension, up to value n_max.
The output dimension never exceeds the number of qubits."""

a, b = np.tril_indices(n_max)
return zip(*[a + 1, b + 1])


def indices_up_to_dm(n_max):
"""Returns an iterator over the number of qubits and output dimension, up to value n_max.
The output dimension values never exceeds 2 ** (n_max). This is to test for density_matrix
qnodes."""

# If the output_dim is to be used as a tuple. First element is for n_qubits and
# the second is for output_dim. For example, for n_max = 3 it will return,
# [(1, (2, 2)), (2, (2, 2)), (2, (4, 4)), (3, (2, 2)), (3, (4, 4)), (3, (8, 8))]

a, b = np.tril_indices(n_max)
return zip(*[a + 1], zip(*[2 ** (b + 1), 2 ** (b + 1)]))


@pytest.mark.parametrize("interface", ["tf"]) # required for the get_circuit fixture
@pytest.mark.usefixtures("get_circuit")
class TestKerasLayer:
Expand Down Expand Up @@ -419,8 +457,8 @@ def c_default(w1, w2, w3, w4, w5, w6, w7, inputs=None):
assert np.allclose(layer_out[0], c(x[0], *weights))

@pytest.mark.parametrize("n_qubits, output_dim", indices_up_to(2))
@pytest.mark.parametrize("batch_size", [2,4,6])
@pytest.mark.parametrize("middle_dim", [2,5,8])
@pytest.mark.parametrize("batch_size", [2, 4, 6])
@pytest.mark.parametrize("middle_dim", [2, 5, 8])
def test_call_broadcast(self, get_circuit, output_dim, middle_dim, batch_size, n_qubits):
"""Test if the call() method performs correctly when the inputs argument has an arbitrary shape (that can
correctly be broadcast over), i.e., for input of shape (batch_size, dn, ... , d0) it outputs with shape
Expand Down Expand Up @@ -563,3 +601,65 @@ def test_model_save_weights(self, model, n_qubits, tmpdir):
assert np.allclose(prediction, prediction_loaded)
for i, w in enumerate(weights):
assert np.allclose(w, weights_loaded[i])


@pytest.mark.parametrize("interface", ["tf"])
@pytest.mark.usefixtures("get_circuit_dm", "model_dm")
class TestKerasLayerIntegrationDM:
"""Integration tests for the pennylane.qnn.keras.KerasLayer class for
density_matrix() returning circuits."""

@pytest.mark.parametrize("n_qubits, output_dim", indices_up_to_dm(3))
@pytest.mark.parametrize("batch_size", [2])
def test_train_model_dm(self, model_dm, batch_size, n_qubits, output_dim):
"""Test if a model can train using the KerasLayer when QNode returns a density_matrix().
The model is composed of two KerasLayers sandwiched between Dense neural network layers,
and the dataset is simply input and output vectors of zeros."""

if not qml.tape_mode_active():
pytest.skip()

x = np.zeros((batch_size, n_qubits))
y = np.zeros((batch_size, output_dim[0] * output_dim[1]))

model_dm.compile(optimizer="sgd", loss="mse")

model_dm.fit(x, y, batch_size=batch_size, verbose=0)

@pytest.mark.parametrize("n_qubits, output_dim", indices_up_to_dm(2))
def test_model_gradients_dm(self, model_dm, output_dim, n_qubits):
"""Test if a gradient can be calculated with respect to all of the trainable variables in
the model."""

if not qml.tape_mode_active():
pytest.skip()

x = tf.zeros((2, n_qubits))
y = tf.zeros((2, output_dim[0] * output_dim[1]))

with tf.GradientTape() as tape:
out = model_dm(x)
loss = tf.keras.losses.mean_squared_error(out, y)

gradients = tape.gradient(loss, model_dm.trainable_variables)
assert all([g.dtype == tf.keras.backend.floatx() for g in gradients])

@pytest.mark.parametrize("n_qubits, output_dim", indices_up_to_dm(2))
def test_model_save_weights_dm(self, model_dm, n_qubits, tmpdir):
"""Test if the model_dm can be successfully saved and reloaded using the get_weights()
method"""

if not qml.tape_mode_active():
pytest.skip()

prediction = model_dm.predict(np.ones(n_qubits))
weights = model_dm.get_weights()
file = str(tmpdir) + "/model"
model_dm.save_weights(file)
model_dm.load_weights(file)
prediction_loaded = model_dm.predict(np.ones(n_qubits))
weights_loaded = model_dm.get_weights()

assert np.allclose(prediction, prediction_loaded)
for i, w in enumerate(weights):
assert np.allclose(w, weights_loaded[i])