diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 9392c0d00c0..8e87f716511 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -122,6 +122,9 @@ allowing QNode measurement statistics to work on devices with more than 32 qubits. [(#1088)](https://github.com/PennyLaneAI/pennylane/pull/1088) +* Due to the addition of `density_matrix()` as a return type from a QNode, tuples are now supported by the `output_dim` parameter in `qnn.KerasLayer`. + [(#1070)](https://github.com/PennyLaneAI/pennylane/pull/1070) +

Breaking changes

* If creating a QNode from a quantum function with an argument named `shots`, @@ -172,7 +175,7 @@ This release contains contributions from (in alphabetical order): -Thomas Bromley, Kyle Godbey, Josh Izaac, Daniel Polatajko, Chase Roberts, Maria Schuld. +Thomas Bromley, Kyle Godbey, Josh Izaac, Daniel Polatajko, Chase Roberts, Sankalp Sanand, Maria Schuld. diff --git a/pennylane/qnn/keras.py b/pennylane/qnn/keras.py index c14aaadc811..9f3e409fa6d 100644 --- a/pennylane/qnn/keras.py +++ b/pennylane/qnn/keras.py @@ -185,6 +185,15 @@ def qnode(inputs, weights): Epoch 8/8 100/100 [==============================] - 9s 87ms/sample - loss: 0.1474 + **Returning a state** + + If your QNode returns the state of the quantum circuit using :func:`~.state` or + :func:`~.density_matrix`, you must immediately follow your quantum Keras Layer with a layer + that casts to reals. For example, you could use + `tf.keras.layers.Lambda `__ + with the function ``lambda x: tf.abs(x)``. This casting is required because TensorFlow's + Keras layers require a real input and are differentiated with respect to real parameters. + .. _Layer: https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer """ @@ -216,8 +225,13 @@ def __init__( self._signature_validation(qnode, weight_shapes) self.qnode = to_tf(qnode, dtype=tf.keras.backend.floatx()) - # Allows output_dim to be specified as an int, e.g., 5, or as a length-1 tuple, e.g., (5,) - self.output_dim = output_dim[0] if isinstance(output_dim, Iterable) else output_dim + # Allows output_dim to be specified as an int or as a tuple, e.g, 5, (5,), (5, 2), [5, 2] + # Note: Single digit values will be considered an int and multiple as a tuple, e.g [5,] or (5,) + # are passed as integer 5 and [5, 2] will be passes as tuple (5, 2) + if isinstance(output_dim, Iterable) and len(output_dim) > 1: + self.output_dim = tuple(output_dim) + else: + self.output_dim = output_dim[0] if isinstance(output_dim, Iterable) else output_dim self.weight_specs = weight_specs if weight_specs is not None else {} @@ -360,7 +374,7 @@ def compute_output_shape(self, input_shape): Returns: tf.TensorShape: shape of output data """ - return tf.TensorShape([input_shape[0], self.output_dim]) + return tf.TensorShape(input_shape[0]).concatenate(self.output_dim) def __str__(self): detail = "" diff --git a/tests/qnn/conftest.py b/tests/qnn/conftest.py index 89a20d2842c..194952c36c0 100644 --- a/tests/qnn/conftest.py +++ b/tests/qnn/conftest.py @@ -16,6 +16,7 @@ """ import pytest import pennylane as qml +import numpy as np @pytest.fixture @@ -50,3 +51,39 @@ def circuit(inputs, w1, w2, w3, w4, w5, w6, w7): return [qml.expval(qml.PauliZ(i)) for i in range(output_dim)] return circuit, weight_shapes + + +@pytest.fixture +def get_circuit_dm(n_qubits, output_dim, interface, tape_mode): + """Fixture for getting a sample quantum circuit with a controllable qubit number and output + dimension for density matrix return type. Returns both the circuit and the shape of the weights.""" + + dev = qml.device("default.qubit", wires=n_qubits) + weight_shapes = { + "w1": (3, n_qubits, 3), + "w2": (1,), + "w3": 1, + "w4": [3], + "w5": (2, n_qubits, 3), + "w6": 3, + "w7": 0, + } + + @qml.qnode(dev, interface=interface) + def circuit(inputs, w1, w2, w3, w4, w5, w6, w7): + """Sample circuit to be used for testing density_matrix() return type. + """ + qml.templates.AngleEmbedding(inputs, wires=list(range(n_qubits))) + qml.templates.StronglyEntanglingLayers(w1, wires=list(range(n_qubits))) + qml.RX(w2[0], wires=0 % n_qubits) + qml.RX(w3, wires=1 % n_qubits) + qml.Rot(*w4, wires=2 % n_qubits) + qml.templates.StronglyEntanglingLayers(w5, wires=list(range(n_qubits))) + qml.Rot(*w6, wires=3 % n_qubits) + qml.RX(w7, wires=4 % n_qubits) + + # Using np.log2() here because output_dim is sampled from varying the number of + # qubits (say, nq) and calculated as (2 ** nq, 2 ** nq) + return qml.density_matrix(wires=[i for i in range(int(np.log2(output_dim[0])))]) + + return circuit, weight_shapes diff --git a/tests/qnn/test_keras.py b/tests/qnn/test_keras.py index dea9b63b63c..5721ff9b16e 100644 --- a/tests/qnn/test_keras.py +++ b/tests/qnn/test_keras.py @@ -25,7 +25,6 @@ pytestmark = pytest.mark.usefixtures("tape_mode") -@pytest.mark.usefixtures("get_circuit") @pytest.fixture def model(get_circuit, n_qubits, output_dim): """Fixture for creating a hybrid Keras model. The model is composed of KerasLayers sandwiched @@ -47,13 +46,52 @@ def model(get_circuit, n_qubits, output_dim): return model +@pytest.fixture +def model_dm(get_circuit_dm, n_qubits, output_dim): + c, w = get_circuit_dm + layer1 = KerasLayer(c, w, output_dim) + layer2 = KerasLayer(c, w, output_dim) + + model = tf.keras.models.Sequential( + [ + tf.keras.layers.Dense(n_qubits), + layer1, + # Adding a lambda layer to take only the real values from density matrix + tf.keras.layers.Lambda(lambda x: tf.abs(x)), + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(n_qubits), + layer2, + # Adding a lambda layer to take only the real values from density matrix + tf.keras.layers.Lambda(lambda x: tf.abs(x)), + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(output_dim[0] * output_dim[1]) + ] + ) + + return model + + def indices_up_to(n_max): """Returns an iterator over the number of qubits and output dimension, up to value n_max. The output dimension never exceeds the number of qubits.""" + a, b = np.tril_indices(n_max) return zip(*[a + 1, b + 1]) +def indices_up_to_dm(n_max): + """Returns an iterator over the number of qubits and output dimension, up to value n_max. + The output dimension values never exceeds 2 ** (n_max). This is to test for density_matrix + qnodes.""" + + # If the output_dim is to be used as a tuple. First element is for n_qubits and + # the second is for output_dim. For example, for n_max = 3 it will return, + # [(1, (2, 2)), (2, (2, 2)), (2, (4, 4)), (3, (2, 2)), (3, (4, 4)), (3, (8, 8))] + + a, b = np.tril_indices(n_max) + return zip(*[a + 1], zip(*[2 ** (b + 1), 2 ** (b + 1)])) + + @pytest.mark.parametrize("interface", ["tf"]) # required for the get_circuit fixture @pytest.mark.usefixtures("get_circuit") class TestKerasLayer: @@ -419,8 +457,8 @@ def c_default(w1, w2, w3, w4, w5, w6, w7, inputs=None): assert np.allclose(layer_out[0], c(x[0], *weights)) @pytest.mark.parametrize("n_qubits, output_dim", indices_up_to(2)) - @pytest.mark.parametrize("batch_size", [2,4,6]) - @pytest.mark.parametrize("middle_dim", [2,5,8]) + @pytest.mark.parametrize("batch_size", [2, 4, 6]) + @pytest.mark.parametrize("middle_dim", [2, 5, 8]) def test_call_broadcast(self, get_circuit, output_dim, middle_dim, batch_size, n_qubits): """Test if the call() method performs correctly when the inputs argument has an arbitrary shape (that can correctly be broadcast over), i.e., for input of shape (batch_size, dn, ... , d0) it outputs with shape @@ -563,3 +601,65 @@ def test_model_save_weights(self, model, n_qubits, tmpdir): assert np.allclose(prediction, prediction_loaded) for i, w in enumerate(weights): assert np.allclose(w, weights_loaded[i]) + + +@pytest.mark.parametrize("interface", ["tf"]) +@pytest.mark.usefixtures("get_circuit_dm", "model_dm") +class TestKerasLayerIntegrationDM: + """Integration tests for the pennylane.qnn.keras.KerasLayer class for + density_matrix() returning circuits.""" + + @pytest.mark.parametrize("n_qubits, output_dim", indices_up_to_dm(3)) + @pytest.mark.parametrize("batch_size", [2]) + def test_train_model_dm(self, model_dm, batch_size, n_qubits, output_dim): + """Test if a model can train using the KerasLayer when QNode returns a density_matrix(). + The model is composed of two KerasLayers sandwiched between Dense neural network layers, + and the dataset is simply input and output vectors of zeros.""" + + if not qml.tape_mode_active(): + pytest.skip() + + x = np.zeros((batch_size, n_qubits)) + y = np.zeros((batch_size, output_dim[0] * output_dim[1])) + + model_dm.compile(optimizer="sgd", loss="mse") + + model_dm.fit(x, y, batch_size=batch_size, verbose=0) + + @pytest.mark.parametrize("n_qubits, output_dim", indices_up_to_dm(2)) + def test_model_gradients_dm(self, model_dm, output_dim, n_qubits): + """Test if a gradient can be calculated with respect to all of the trainable variables in + the model.""" + + if not qml.tape_mode_active(): + pytest.skip() + + x = tf.zeros((2, n_qubits)) + y = tf.zeros((2, output_dim[0] * output_dim[1])) + + with tf.GradientTape() as tape: + out = model_dm(x) + loss = tf.keras.losses.mean_squared_error(out, y) + + gradients = tape.gradient(loss, model_dm.trainable_variables) + assert all([g.dtype == tf.keras.backend.floatx() for g in gradients]) + + @pytest.mark.parametrize("n_qubits, output_dim", indices_up_to_dm(2)) + def test_model_save_weights_dm(self, model_dm, n_qubits, tmpdir): + """Test if the model_dm can be successfully saved and reloaded using the get_weights() + method""" + + if not qml.tape_mode_active(): + pytest.skip() + + prediction = model_dm.predict(np.ones(n_qubits)) + weights = model_dm.get_weights() + file = str(tmpdir) + "/model" + model_dm.save_weights(file) + model_dm.load_weights(file) + prediction_loaded = model_dm.predict(np.ones(n_qubits)) + weights_loaded = model_dm.get_weights() + + assert np.allclose(prediction, prediction_loaded) + for i, w in enumerate(weights): + assert np.allclose(w, weights_loaded[i])