Skip to content

Commit

Permalink
Merge branch 'master' into hessian_autograd
Browse files Browse the repository at this point in the history
  • Loading branch information
josh146 committed Mar 26, 2021
2 parents fedbdeb + ede35f4 commit 8745c6f
Show file tree
Hide file tree
Showing 24 changed files with 1,156 additions and 121 deletions.
113 changes: 113 additions & 0 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,93 @@
[-0.3826, -0.1124]]])
```

- The TensorFlow interface now supports computing second derivatives and Hessians of hybrid quantum models.
Second derivatives are supported on both hardware and simulators.
[(#1110)](https://github.com/PennyLaneAI/pennylane/pull/1110)

```python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, interface='tf', diff_method='parameter-shift')
def circuit(x):
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=0)
return qml.expval(qml.PauliZ(0))

x = tf.Variable([0.1, 0.2], dtype=tf.float64)

with tf.GradientTape() as tape1:
with tf.GradientTape() as tape2:
y = circuit(x)
grad = tape2.gradient(res, x)

hessian = tape1.jacobian(grad, x)
```

To compute just the diagonal of the Hessian, the gradient of the
first derivatives can be taken:

```python
hessian_diagonals = tape1.gradient(grad, x)
```

* Adds a new transform `qml.ctrl` that adds control wires to subroutines.
[(#1157)](https://github.com/PennyLaneAI/pennylane/pull/1157)

Here's a simple usage example:

```python
def my_ansatz(params):
qml.RX(params[0], wires=0)
qml.RZ(params[1], wires=1)

# Create a new method that applies `my_ansatz`
# controlled by the "2" wire.
my_anzats2 = qml.ctrl(my_ansatz, control=2)

@qml.qnode(...)
def circuit(params):
my_ansatz2(params)
return qml.state()
```

The above `circuit` would be equivalent to:

```python
@qml.qnode(...)
def circuit(params):
qml.CRX(params[0], wires=[2, 0])
qml.CRZ(params[1], wires=[2, 1])
return qml.state()
```

The `qml.ctrl` transform is especially useful to repeatedly apply an
operation which is controlled by different qubits in each repetition. A famous example is Shor's algorithm.

```python
def modmul(a, mod, wires):
# Some complex set of gates that implements modular multiplcation.
# qml.CNOT(...); qml.Toffoli(...); ...

@qml.qnode(...)
def shor(a, mod, scratch_wires, qft_wires):
for i, wire in enumerate(qft_wires):
qml.Hadamard(wire)

# Create the controlled modular multiplication
# subroutine based on the control wire.
cmodmul = qml.ctrl(modmul, control=wire)

# Execute the controlled modular multiplication.
cmodmul(a ** i, mod, scratch_wires)

qml.adjoint(qml.QFT)(qft_wires)
return qml.sample()

```

In the future, devices will be able to exploit the sparsity of controlled operations to
improve simulation performance.

* Adds a new optimizer `qml.ShotAdaptiveOptimizer`, a gradient-descent optimizer where
the shot rate is adaptively calculated using the variances of the parameter-shift gradient.
[(#1139)](https://github.com/PennyLaneAI/pennylane/pull/1139)
Expand Down Expand Up @@ -398,6 +485,32 @@

<h3>Improvements</h3>

* Edited the ``MottonenStatePreparation`` template to improve performance on states with only real amplitudes
by reducing the number of redundant CNOT gates at the end of a circuit.

```python
dev = qml.device("default.qubit", wires=2)

inputstate = [np.sqrt(0.2), np.sqrt(0.3), np.sqrt(0.4), np.sqrt(0.1)]

@qml.qnode(dev)
def circuit():
mottonen.MottonenStatePreparation(inputstate,wires=[0, 1])
return qml.expval(qml.PauliZ(0))
```
Previously returned:
```pycon
>>> print(qml.draw(circuit)())
0: ──RY(1.57)──╭C─────────────╭C──╭C──╭C──┤ ⟨Z⟩
1: ──RY(1.35)──╰X──RY(0.422)──╰X──╰X──╰X──┤
```
Now returns:
```pycon
>>> print(qml.draw(circuit)())
0: ──RY(1.57)──╭C─────────────╭C──┤ ⟨Z⟩
1: ──RY(1.35)──╰X──RY(0.422)──╰X──┤
```

- The `QAOAEmbedding` and `BasicEntanglerLayers` are now classes inheriting
from `Operation`, and define the ansatz in their `expand()` method. This
change does not affect the user interface.
Expand Down
2 changes: 1 addition & 1 deletion doc/code/qml_templates.rst
Original file line number Diff line number Diff line change
Expand Up @@ -77,4 +77,4 @@ Utility functions for quantum Monte Carlo
.. automodapi:: pennylane.templates.subroutines.qmc
:no-heading:
:no-main-docstr:
:skip: QuantumMonteCarlo, template
:skip: QuantumMonteCarlo, Operation, Wires
4 changes: 3 additions & 1 deletion pennylane/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,9 @@
from pennylane.optimize import *
from pennylane.qnode import QNode, qnode
from pennylane.templates import broadcast, layer, template
from pennylane.transforms import adjoint, draw, measurement_grouping, metric_tensor
from pennylane.transforms import draw, measurement_grouping, metric_tensor
from pennylane.transforms.adjoint import adjoint
from pennylane.transforms.control import ctrl, ControlledOperation
from pennylane.utils import inv
from pennylane.vqe import ExpvalCost, Hamiltonian, VQECost

Expand Down
1 change: 1 addition & 0 deletions pennylane/devices/default_qubit_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ def circuit():
"CRX": jax_ops.CRX,
"CRY": jax_ops.CRY,
"CRZ": jax_ops.CRZ,
"CRot": jax_ops.CRot,
"MultiRZ": jax_ops.MultiRZ,
"SingleExcitation": jax_ops.SingleExcitation,
"SingleExcitationPlus": jax_ops.SingleExcitationPlus,
Expand Down
3 changes: 3 additions & 0 deletions pennylane/devices/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,9 @@ def pytest_generate_tests(metafunc):
# translate command line string to None if necessary
device_kwargs["shots"] = None if (opt.shots == "None") else int(opt.shots)

# store user defined device kwargs
device_kwargs.update(opt.device_kwargs)

list_of_device_kwargs.append(device_kwargs)

# define the device_kwargs parametrization:
Expand Down
82 changes: 68 additions & 14 deletions pennylane/interfaces/tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,34 +138,88 @@ def _execute(self, params, **input_kwargs):
res = self.execute_device(args, input_kwargs["device"])
self.set_parameters(all_params, trainable_only=False)

def grad(grad_output, **tfkwargs):
variables = tfkwargs.get("variables", None)
# The following dictionary caches the Jacobian and Hessian matrices,
# so that they can be re-used for different vjp/vhp computations
# within the same backpropagation call.
# This dictionary is tied to an instance of the inner function jacobian_product
# called within tf_tape.gradient or tf_tape.jacobian,
# via closure. Once tf_tape.gradient/ jacobian has returned, the jacobian_product instance
# will no longer be in scope and the memory will be freed.
saved_grad_matrices = {}

def _evaluate_grad_matrix(grad_matrix_fn):
"""Convenience function for generating gradient matrices
for the given parameter values.
This function serves two purposes:
* Avoids duplicating logic surrounding parameter unwrapping/wrapping
* Takes advantage of closure, to cache computed gradient matrices via
the ``saved_grad_matrices`` dictionary, to avoid gradient matrices being
computed multiple redundant times.
This is particularly useful when differentiating vector-valued QNodes.
Because tensorflow requests the vector-grad matrix product,
and *not* the full grad matrix, differentiating vector-valued
functions will result in multiple backward passes.
Args:
grad_matrix_fn (str): Name of the gradient matrix function. Should correspond to an existing
tape method. Currently allowed values include ``"jacobian"`` and ``"hessian"``.
Returns:
array[float]: the gradient matrix
"""
if grad_matrix_fn in saved_grad_matrices:
return saved_grad_matrices[grad_matrix_fn]

self.set_parameters(all_params_unwrapped, trainable_only=False)
jacobian = self.jacobian(input_kwargs["device"], params=args, **self.jacobian_options)
grad_matrix = getattr(self, grad_matrix_fn)(
input_kwargs["device"], params=args, **self.jacobian_options
)
self.set_parameters(all_params, trainable_only=False)

jacobian = tf.constant(jacobian, dtype=self.dtype)
grad_matrix = tf.constant(grad_matrix, dtype=self.dtype)
saved_grad_matrices[grad_matrix_fn] = grad_matrix

return grad_matrix

def jacobian_product(dy, **tfkwargs):
variables = tfkwargs.get("variables", None)
dy_row = tf.reshape(dy, [1, -1])

@tf.custom_gradient
def jacobian(p):
def hessian_product(ddy, **tfkwargs):
variables = tfkwargs.get("variables", None)
hessian = _evaluate_grad_matrix("hessian")

if self.output_dim == 1:
hessian = tf.expand_dims(hessian, -1)

# Reshape gradient output array as a 2D row-vector.
grad_output_row = tf.reshape(grad_output, [1, -1])
vhp = tf.cond(
tf.rank(hessian) > 2,
lambda: dy_row @ ddy @ hessian @ tf.transpose(dy_row),
lambda: ddy @ hessian,
)

# Calculate the vector-Jacobian matrix product, and unstack the output.
grad_input = tf.matmul(grad_output_row, jacobian)
grad_input = tf.unstack(tf.reshape(grad_input, [-1]))
vhp = tf.unstack(tf.reshape(vhp, [-1]))
return (vhp, variables) if variables is not None else vhp

if variables is not None:
return grad_input, variables
return _evaluate_grad_matrix("jacobian"), hessian_product

return grad_input
vjp = tf.matmul(dy_row, jacobian(params))
vjp = tf.unstack(tf.reshape(vjp, [-1]))
return (vjp, variables) if variables is not None else vjp

if self.is_sampled:
return res, grad
return res, jacobian_product

if res.dtype == np.dtype("object"):
res = np.hstack(res)

return tf.convert_to_tensor(res, dtype=self.dtype), grad
return tf.convert_to_tensor(res, dtype=self.dtype), jacobian_product

@classmethod
def apply(cls, tape, dtype=tf.float64):
Expand Down
7 changes: 7 additions & 0 deletions pennylane/interfaces/torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,13 @@ def _evaluate_grad_matrix(grad_matrix_fn):
Because PyTorch requests the vector-GradMatrix product,
and *not* the full GradMatrix, differentiating vector-valued
functions will result in multiple backward passes.
Args:
grad_matrix_fn (str): Name of the gradient matrix function. Should correspond to an existing
tape method. Currently allowed values include ``"jacobian"`` and ``"hessian"``.
Returns:
array[float]: the gradient matrix
"""
if grad_matrix_fn in ctx.saved_grad_matrices:
return ctx.saved_grad_matrices[grad_matrix_fn]
Expand Down
2 changes: 1 addition & 1 deletion pennylane/operation.py
Original file line number Diff line number Diff line change
Expand Up @@ -623,7 +623,7 @@ def expand(self):
operations decomposition, or if not implemented, simply
the operation itself.
"""
tape = qml.tape.QuantumTape()
tape = qml.tape.QuantumTape(do_queue=False)

with tape:
self.decomposition(*self.data, wires=self.wires)
Expand Down
Loading

0 comments on commit 8745c6f

Please sign in to comment.