diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index a9ba6d48902..032bd204294 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -1,4 +1,42 @@ -# Release 0.14.0 (current release) +# Release 0.14.1 (current release) + +

Bug fixes

+ +* Fixes a bug where inverse operations could not be differentiated + using backpropagation on `default.qubit`. + [(#1072)](https://github.com/PennyLaneAI/pennylane/pull/1072) + +* The QNode has a new keyword argument, `max_expansion`, that determines the maximum number of times + the internal circuit should be expanded when executed on a device. In addition, the default number + of max expansions has been increased from 2 to 10, allowing devices that require more than two + operator decompositions to be supported. + [(#1074)](https://github.com/PennyLaneAI/pennylane/pull/1074) + +* Fixes a bug where `Hamiltonian` objects created with non-list arguments + raised an error for arithmetic operations. + [(#1082)](https://github.com/PennyLaneAI/pennylane/pull/1082) + +* Fixes a bug where `Hamiltonian` objects with no coefficients or operations + would return a faulty result when used with `ExpvalCost`. + [(#1082)](https://github.com/PennyLaneAI/pennylane/pull/1082) + +* Fixes a testing bug where tests that required JAX would fail if JAX was not installed. + The tests will now instead be skipped if JAX cannot be imported. + [(#1066)](https://github.com/PennyLaneAI/pennylane/pull/1066) + +

Documentation

+ +* Updates mentions of `generate_hamiltonian` to `molecular_hamiltonian` in the + docstrings of the `ExpvalCost` and `Hamiltonian` classes. + [(#1077)](https://github.com/PennyLaneAI/pennylane/pull/1077) + +

Contributors

+ +This release contains contributions from (in alphabetical order): + +Thomas Bromley, Josh Izaac, Antal Száva. + +# Release 0.14.0

New features since last release

diff --git a/pennylane/_version.py b/pennylane/_version.py index 836bec02c4e..f91c1cdab2b 100644 --- a/pennylane/_version.py +++ b/pennylane/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.14.0" +__version__ = "0.14.1" diff --git a/pennylane/devices/default_qubit_autograd.py b/pennylane/devices/default_qubit_autograd.py index d5cb1883ab9..3b11ffc88fc 100644 --- a/pennylane/devices/default_qubit_autograd.py +++ b/pennylane/devices/default_qubit_autograd.py @@ -157,11 +157,18 @@ def _get_unitary_matrix(self, unitary): the unitary in the computational basis, or, in the case of a diagonal unitary, a 1D array representing the matrix diagonal. """ - op_name = unitary.name + op_name = unitary.name.split(".inv")[0] + if op_name in self.parametric_ops: if op_name == "MultiRZ": - return self.parametric_ops[unitary.name](*unitary.parameters, len(unitary.wires)) - return self.parametric_ops[unitary.name](*unitary.parameters) + mat = self.parametric_ops[op_name](*unitary.parameters, len(unitary.wires)) + else: + mat = self.parametric_ops[op_name](*unitary.parameters) + + if unitary.inverse: + mat = self._transpose(self._conj(mat)) + + return mat if isinstance(unitary, DiagonalOperation): return unitary.eigvals diff --git a/pennylane/devices/default_qubit_jax.py b/pennylane/devices/default_qubit_jax.py index 439197f50d1..23c7d3a7a43 100644 --- a/pennylane/devices/default_qubit_jax.py +++ b/pennylane/devices/default_qubit_jax.py @@ -204,11 +204,18 @@ def _get_unitary_matrix(self, unitary): the unitary in the computational basis, or, in the case of a diagonal unitary, a 1D array representing the matrix diagonal. """ - op_name = unitary.name + op_name = unitary.name.split(".inv")[0] + if op_name in self.parametric_ops: if op_name == "MultiRZ": - return self.parametric_ops[unitary.name](*unitary.parameters, len(unitary.wires)) - return self.parametric_ops[unitary.name](*unitary.parameters) + mat = self.parametric_ops[op_name](*unitary.parameters, len(unitary.wires)) + else: + mat = self.parametric_ops[op_name](*unitary.parameters) + + if unitary.inverse: + mat = self._transpose(self._conj(mat)) + + return mat if isinstance(unitary, DiagonalOperation): return unitary.eigvals diff --git a/pennylane/devices/default_qubit_tf.py b/pennylane/devices/default_qubit_tf.py index 7be29983530..a5965bf5888 100644 --- a/pennylane/devices/default_qubit_tf.py +++ b/pennylane/devices/default_qubit_tf.py @@ -214,10 +214,18 @@ def _get_unitary_matrix(self, unitary): the return type will be a ``np.ndarray``. For parametric unitaries, a ``tf.Tensor`` object will be returned. """ - if unitary.name in self.parametric_ops: - if unitary.name == "MultiRZ": - return self.parametric_ops[unitary.name](unitary.parameters, len(unitary.wires)) - return self.parametric_ops[unitary.name](*unitary.parameters) + op_name = unitary.name.split(".inv")[0] + + if op_name in self.parametric_ops: + if op_name == "MultiRZ": + mat = self.parametric_ops[op_name](*unitary.parameters, len(unitary.wires)) + else: + mat = self.parametric_ops[op_name](*unitary.parameters) + + if unitary.inverse: + mat = self._transpose(self._conj(mat)) + + return mat if isinstance(unitary, DiagonalOperation): return unitary.eigvals diff --git a/pennylane/tape/qnode.py b/pennylane/tape/qnode.py index ce200c548c2..640887aef99 100644 --- a/pennylane/tape/qnode.py +++ b/pennylane/tape/qnode.py @@ -107,6 +107,11 @@ class QNode: and is stored and re-used for further quantum evaluations. Only set this to False if it is known that the underlying quantum structure is **independent of QNode input**. + max_expansion (int): The number of times the internal circuit should be expanded when + executed on a device. Expansion occurs when an operation or measurement is not + supported, and results in a gate decomposition. If any operations in the decomposition + remain unsupported by the device, another expansion occurs. + Keyword Args: h=1e-7 (float): step size for the finite difference method order=1 (int): The order of the finite difference method to use. ``1`` corresponds @@ -125,7 +130,14 @@ class QNode: # pylint:disable=too-many-instance-attributes,too-many-arguments def __init__( - self, func, device, interface="autograd", diff_method="best", mutable=True, **diff_options + self, + func, + device, + interface="autograd", + diff_method="best", + mutable=True, + max_expansion=10, + **diff_options, ): if interface is not None and interface not in self.INTERFACE_MAP: @@ -156,7 +168,7 @@ def __init__( self.diff_options.update(tape_diff_options) self.dtype = np.float64 - self.max_expansion = 2 + self.max_expansion = max_expansion # pylint: disable=too-many-return-statements @staticmethod @@ -765,7 +777,9 @@ def to_jax(self): INTERFACE_MAP = {"autograd": to_autograd, "torch": to_torch, "tf": to_tf, "jax": to_jax} -def qnode(device, interface="autograd", diff_method="best", mutable=True, **diff_options): +def qnode( + device, interface="autograd", diff_method="best", mutable=True, max_expansion=10, **diff_options +): """Decorator for creating QNodes. This decorator is used to indicate to PennyLane that the decorated function contains a @@ -842,6 +856,11 @@ def qnode(device, interface="autograd", diff_method="best", mutable=True, **diff and is stored and re-used for further quantum evaluations. Only set this to False if it is known that the underlying quantum structure is **independent of QNode input**. + max_expansion (int): The number of times the internal circuit should be expanded when + executed on a device. Expansion occurs when an operation or measurement is not + supported, and results in a gate decomposition. If any operations in the decomposition + remain unsupported by the device, another expansion occurs. + Keyword Args: h=1e-7 (float): Step size for the finite difference method. order=1 (int): The order of the finite difference method to use. ``1`` corresponds @@ -865,6 +884,7 @@ def qfunc_decorator(func): interface=interface, diff_method=diff_method, mutable=mutable, + max_expansion=max_expansion, **diff_options, ) return update_wrapper(qn, func) diff --git a/pennylane/vqe/vqe.py b/pennylane/vqe/vqe.py index 9f23256df14..15fde7a2eae 100644 --- a/pennylane/vqe/vqe.py +++ b/pennylane/vqe/vqe.py @@ -42,7 +42,7 @@ class Hamiltonian: simplify (bool): Specifies whether the Hamiltonian is simplified upon initialization (like-terms are combined). The default value is `False`. - .. seealso:: :class:`~.ExpvalCost`, :func:`~.generate_hamiltonian` + .. seealso:: :class:`~.ExpvalCost`, :func:`~.molecular_hamiltonian` **Example:** @@ -66,7 +66,7 @@ class Hamiltonian: >>> print(H) (0.8) [Hermitian0'1] - Alternatively, the :func:`~.generate_hamiltonian` function from the + Alternatively, the :func:`~.molecular_hamiltonian` function from the :doc:`/introduction/chemistry` module can be used to generate a molecular Hamiltonian. """ @@ -90,8 +90,8 @@ def __init__(self, coeffs, observables, simplify=False): "Could not create circuits. Some or all observables are not valid." ) - self._coeffs = coeffs - self._ops = observables + self._coeffs = list(coeffs) + self._ops = list(observables) if simplify: self.simplify() @@ -396,7 +396,7 @@ class ExpvalCost: callable: a cost function with signature ``cost_fn(params, **kwargs)`` that evaluates the expectation of the Hamiltonian on the provided device(s) - .. seealso:: :class:`~.Hamiltonian`, :func:`~.generate_hamiltonian`, :func:`~.map`, :func:`~.dot` + .. seealso:: :class:`~.Hamiltonian`, :func:`~.molecular_hamiltonian`, :func:`~.map`, :func:`~.dot` **Example:** @@ -486,6 +486,10 @@ def __init__( self._multiple_devices = isinstance(device, Sequence) """Bool: Records if multiple devices are input""" + if all(c == 0 for c in coeffs) or not coeffs: + self.cost_fn = lambda *args, **kwargs: np.array(0) + return + tape_mode = qml.tape_mode_active() self._optimize = optimize diff --git a/tests/conftest.py b/tests/conftest.py index b36df94e564..6753c647cfd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -140,6 +140,11 @@ def skip_if_no_tf_support(tf_support): pytest.skip("Skipped, no tf support") +@pytest.fixture +def skip_if_no_jax_support(): + pytest.importorskip("jax") + + @pytest.fixture(scope="module", params=[1, 2, 3]) def seed(request): diff --git a/tests/devices/test_default_qubit_autograd.py b/tests/devices/test_default_qubit_autograd.py index e8841213e57..7f89b274e16 100644 --- a/tests/devices/test_default_qubit_autograd.py +++ b/tests/devices/test_default_qubit_autograd.py @@ -395,6 +395,23 @@ def circuit(param): res = qml.jacobian(circuit)(param) assert np.allclose(res, np.zeros(wires **2)) + def test_inverse_operation_jacobian_backprop(self, tol): + """Test that inverse operations work in backprop + mode""" + dev = qml.device('default.qubit.autograd', wires=1) + + @qml.qnode(dev, diff_method="backprop") + def circuit(param): + qml.RY(param, wires=0).inv() + return qml.expval(qml.PauliX(0)) + + x = 0.3 + res = circuit(x) + assert np.allclose(res, -np.sin(x), atol=tol, rtol=0) + + grad = qml.grad(circuit)(x) + assert np.allclose(grad, -np.cos(x), atol=tol, rtol=0) + def test_full_subsystem(self, mocker): """Test applying a state vector to the full subsystem""" dev = DefaultQubitAutograd(wires=['a', 'b', 'c']) diff --git a/tests/devices/test_default_qubit_jax.py b/tests/devices/test_default_qubit_jax.py index bbc09b3f9c7..4bb360a2a27 100644 --- a/tests/devices/test_default_qubit_jax.py +++ b/tests/devices/test_default_qubit_jax.py @@ -475,6 +475,24 @@ def circuit(param): res = jacobian_transform(circuit)(param) assert jnp.allclose(res, jnp.zeros(wires ** 2)) + def test_inverse_operation_jacobian_backprop(self, tol): + """Test that inverse operations work in backprop + mode""" + dev = qml.device('default.qubit.jax', wires=1) + + @qml.qnode(dev, diff_method="backprop", interface="jax") + def circuit(param): + qml.RY(param, wires=0).inv() + return qml.expval(qml.PauliX(0)) + + x = 0.3 + res = circuit(x) + assert np.allclose(res, -np.sin(x), atol=tol, rtol=0) + + # Adjust grad func to be compatible when tested with both old and new cores of PennyLane + grad = jax.grad(lambda a: circuit(a).reshape(()))(x) + assert np.allclose(grad, -np.cos(x), atol=tol, rtol=0) + def test_full_subsystem(self, mocker): """Test applying a state vector to the full subsystem""" dev = DefaultQubitJax(wires=["a", "b", "c"]) diff --git a/tests/devices/test_default_qubit_tf.py b/tests/devices/test_default_qubit_tf.py index 3cb86794485..619bab78da9 100644 --- a/tests/devices/test_default_qubit_tf.py +++ b/tests/devices/test_default_qubit_tf.py @@ -1238,6 +1238,26 @@ def cost(params): ) assert np.allclose(res.numpy(), expected_grad, atol=tol, rtol=0) + def test_inverse_operation_jacobian_backprop(self, tol): + """Test that inverse operations work in backprop + mode""" + dev = qml.device('default.qubit.tf', wires=1) + + @qml.qnode(dev, diff_method="backprop", interface="tf") + def circuit(param): + qml.RY(param, wires=0).inv() + return qml.expval(qml.PauliX(0)) + + x = tf.Variable(0.3) + + with tf.GradientTape() as tape: + res = circuit(x) + + assert np.allclose(res, -tf.sin(x), atol=tol, rtol=0) + + grad = tape.gradient(res, x) + assert np.allclose(grad, -tf.cos(x), atol=tol, rtol=0) + @pytest.mark.parametrize("interface", ["autograd", "torch"]) def test_error_backprop_wrong_interface(self, interface, tol): """Tests that an error is raised if diff_method='backprop' but not using diff --git a/tests/qnn/test_cost.py b/tests/qnn/test_cost.py index 57113403f5d..716dc5ef96a 100644 --- a/tests/qnn/test_cost.py +++ b/tests/qnn/test_cost.py @@ -42,7 +42,9 @@ def skip_if_no_torch_support(): @pytest.mark.parametrize("interface", ALLOWED_INTERFACES) -@pytest.mark.usefixtures("skip_if_no_torch_support", "skip_if_no_tf_support") +@pytest.mark.usefixtures( + "skip_if_no_torch_support", "skip_if_no_tf_support", "skip_if_no_jax_support" +) class TestSquaredErrorLoss: def test_no_target(self, interface): with pytest.raises(ValueError, match="The target cannot be None"): diff --git a/tests/test_vqe.py b/tests/test_vqe.py index 373c46cee12..069325cd41f 100644 --- a/tests/test_vqe.py +++ b/tests/test_vqe.py @@ -111,6 +111,14 @@ def seed(): ((0.5, 1.2), (qml.PauliZ(0), qml.PauliZ(1)), [0.5 * 1.0, 1.2 * 1.0]), ] + + +zero_hamiltonians_with_expvals = [ + ([], [], [0]), + ((0, 0), (qml.PauliZ(0), qml.PauliZ(1)), [0]), + ((0,0,0), (qml.PauliX(0) @ qml.Identity(1), qml.PauliX(0), qml.PauliX(1)), [0]), +] + simplify_hamiltonians = [ ( qml.Hamiltonian([1, 1, 1], [qml.PauliX(0) @ qml.Identity(1), qml.PauliX(0), qml.PauliX(1)]), @@ -143,6 +151,20 @@ def seed(): [qml.Hermitian(np.array([[1, 0], [0, -1]]), "a"), qml.PauliX("b") @ qml.PauliY(1.3)], ), ), + + # Simplifies to zero Hamiltonian + ( + qml.Hamiltonian([1, -0.5, -0.5], [qml.PauliX(0) @ qml.Identity(1), qml.PauliX(0), qml.PauliX(0)]), + qml.Hamiltonian([], []), + ), + ( + qml.Hamiltonian([1, -1], [qml.PauliX(4) @ qml.Identity(0) @ qml.PauliX(1), qml.PauliX(4) @ qml.PauliX(1)]), + qml.Hamiltonian([], []), + ), + ( + qml.Hamiltonian([0], [qml.Identity(0)]), + qml.Hamiltonian([0], [qml.Identity(0)]), + ), ] equal_hamiltonians = [ @@ -231,6 +253,15 @@ def seed(): qml.PauliX("b") @ qml.Identity(5), qml.Hamiltonian([2, 1.2, 0.1], [qml.PauliX("b"), qml.PauliZ(3.1), qml.PauliX(1.6)]), ), + + # Case where arguments coeffs and ops to the Hamiltonian are iterables other than lists + ( + qml.Hamiltonian((1, 1.2, 0.1), (qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2))), + qml.Hamiltonian(np.array([0.5, 0.3, 1]), np.array([qml.PauliX(0), qml.PauliX(1), qml.PauliX(2)])), + qml.Hamiltonian( + (1.5, 1.2, 1.1, 0.3), np.array([qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2), qml.PauliX(1)]) + ), + ), ] sub_hamiltonians = [ @@ -276,6 +307,28 @@ def seed(): qml.PauliX("b") @ qml.Identity(1), qml.Hamiltonian([1.2, 0.1], [qml.PauliZ(3.1), qml.PauliX(1.6)]), ), + + # The result is the zero Hamiltonian + ( + qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]), + qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]), + qml.Hamiltonian([], []), + ), + ( + qml.Hamiltonian([1, 2], [qml.PauliX(4), qml.PauliZ(2)]), + qml.Hamiltonian([1, 2], [qml.PauliX(4), qml.PauliZ(2)]), + qml.Hamiltonian([], []), + ), + + + # Case where arguments coeffs and ops to the Hamiltonian are iterables other than lists + ( + qml.Hamiltonian((1, 1.2, 0.1), (qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2))), + qml.Hamiltonian(np.array([0.5, 0.3, 1.6]), np.array([qml.PauliX(0), qml.PauliX(1), qml.PauliX(2)])), + qml.Hamiltonian( + (0.5, 1.2, -1.5, -0.3), np.array([qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2), qml.PauliX(1)]) + ), + ), ] mul_hamiltonians = [ @@ -300,6 +353,25 @@ def seed(): [qml.Hermitian(np.array([[1, 0], [0, -1]]), "b"), qml.PauliZ(23) @ qml.PauliZ(0)], ), ), + + # The result is the zero Hamiltonian + ( + 0, + qml.Hamiltonian([1], [qml.PauliX(0)]), + qml.Hamiltonian([0], [qml.PauliX(0)]), + ), + ( + 0, + qml.Hamiltonian([1, 1.2, 0.1], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]), + qml.Hamiltonian([0, 0, 0], [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(2)]), + ), + + # Case where arguments coeffs and ops to the Hamiltonian are iterables other than lists + ( + 3, + qml.Hamiltonian((1.5, 0.5), (qml.PauliX(0), qml.PauliZ(1))), + qml.Hamiltonian(np.array([4.5, 1.5]), np.array([qml.PauliX(0), qml.PauliZ(1)])), + ), ] matmul_hamiltonians = [ @@ -347,6 +419,21 @@ def seed(): qml.PauliX(2), qml.Hamiltonian([1, 1], [qml.PauliX(0) @ qml.PauliX(2), qml.PauliZ(1) @ qml.PauliX(2)]), ), + + # Case where arguments coeffs and ops to the Hamiltonian are iterables other than lists + ( + qml.Hamiltonian((1, 1), (qml.PauliX(0), qml.PauliZ(1))), + qml.Hamiltonian(np.array([0.5, 0.5]), np.array([qml.PauliZ(2), qml.PauliZ(3)])), + qml.Hamiltonian( + (0.5, 0.5, 0.5, 0.5), + np.array([ + qml.PauliX(0) @ qml.PauliZ(2), + qml.PauliX(0) @ qml.PauliZ(3), + qml.PauliZ(1) @ qml.PauliZ(2), + qml.PauliZ(1) @ qml.PauliZ(3), + ]), + ), + ), ] big_hamiltonian_coeffs = np.array( @@ -507,7 +594,7 @@ def test_hamiltonian_valid_init(self, coeffs, ops): """Tests that the Hamiltonian object is created with the correct attributes""" H = qml.vqe.Hamiltonian(coeffs, ops) - assert H.terms == (coeffs, ops) + assert H.terms == (list(coeffs), list(ops)) @pytest.mark.parametrize("coeffs, ops", invalid_hamiltonians) def test_hamiltonian_invalid_init_exception(self, coeffs, ops): @@ -709,7 +796,7 @@ def test_cost_evaluate(self, params, ansatz, coeffs, observables): assert type(expval(params)) == np.float64 assert np.shape(expval(params)) == () # expval should be scalar - @pytest.mark.parametrize("coeffs, observables, expected", hamiltonians_with_expvals) + @pytest.mark.parametrize("coeffs, observables, expected", hamiltonians_with_expvals + zero_hamiltonians_with_expvals) def test_cost_expvals(self, coeffs, observables, expected): """Tests that the cost function returns correct expectation values""" dev = qml.device("default.qubit", wires=2) @@ -824,6 +911,25 @@ def test_optimize_grad(self): assert np.allclose(dc, big_hamiltonian_grad) assert np.allclose(dc2, big_hamiltonian_grad) + @pytest.mark.parametrize('opt', [True, False]) + def test_grad_zero_hamiltonian(self, opt): + """Test that the gradient of ExpvalCost is accessible and correct when using observable + optimization and the autograd interface with a zero Hamiltonian.""" + if not qml.tape_mode_active(): + pytest.skip("This test is only intended for tape mode") + + dev = qml.device("default.qubit", wires=4) + hamiltonian = qml.Hamiltonian([0], [qml.PauliX(0)]) + + cost = qml.ExpvalCost( + qml.templates.StronglyEntanglingLayers, hamiltonian, dev, optimize=opt, diff_method="parameter-shift" + ) + + w = qml.init.strong_ent_layers_uniform(2, 4, seed=1967) + + dc = qml.grad(cost)(w) + assert np.allclose(dc, 0) + def test_optimize_grad_torch(self, torch_support): """Test that the gradient of ExpvalCost is accessible and correct when using observable optimization and the Torch interface."""