Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update gradients module to stop mutating operators in-place #4220

Merged
merged 35 commits into from
Jul 11, 2023
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
f702e04
Adding changes for shift rules
mudit2812 Jun 6, 2023
2d2d0bf
Testing changes
mudit2812 Jun 7, 2023
afcd99f
Fixed op indices
mudit2812 Jun 12, 2023
239cd30
Merge branch 'master' into grad_mutate
mudit2812 Jun 12, 2023
f221f32
Fixed indexing
mudit2812 Jun 13, 2023
e10f87b
Updated `bind_new_parameters`
mudit2812 Jun 13, 2023
5224d70
Updated `tape.get_operation`
mudit2812 Jun 13, 2023
4b30a69
Merge branch 'master' into grad_mutate
mudit2812 Jun 13, 2023
df3eeed
Updated tests
mudit2812 Jun 14, 2023
2c80796
Merge branch 'master' into grad_mutate
mudit2812 Jun 14, 2023
51a04da
Test updates; multishifting works
mudit2812 Jun 14, 2023
d5bbf9a
Fixing interface
mudit2812 Jun 14, 2023
751a52f
Updated shifting; added dispatch for templates
mudit2812 Jun 14, 2023
9c32bd5
Updated shifting function
mudit2812 Jun 14, 2023
057760d
Merge branch 'master' into grad_mutate
mudit2812 Jun 14, 2023
12c4321
Fixed index error
mudit2812 Jun 14, 2023
8685b96
Removed commented code
mudit2812 Jun 15, 2023
89d7178
[skip ci] Reverted changes to `bind_new_parameters
mudit2812 Jun 15, 2023
d1dd083
Merge branch 'master' into grad_mutate
mudit2812 Jun 23, 2023
23a1932
Merge branch 'master' into grad_mutate
mudit2812 Jul 4, 2023
60ca03b
Update to remove copying
mudit2812 Jul 4, 2023
8eb03df
Update changelog
mudit2812 Jul 4, 2023
fe8c725
Merge branch 'master' into grad_mutate
mudit2812 Jul 7, 2023
93a1542
Apply suggestions from code review
mudit2812 Jul 7, 2023
1df562f
Removed unused import
mudit2812 Jul 7, 2023
90749cf
Roll back suggested change
mudit2812 Jul 7, 2023
35fa884
Remove state vector support from `math/quantum.py` (#4322)
eddddddy Jul 9, 2023
a5f8eef
Enable CI and pre-commit hook to lint tests (#4335)
timmysilv Jul 10, 2023
ff85eab
Update broadcasting transforms to use `bind_new_parameters` (#4288)
eddddddy Jul 10, 2023
64a6a9b
Deprecate X and P (#4330)
lillian542 Jul 10, 2023
73b009e
Support wire labels in `qinfo` transforms (#4331)
eddddddy Jul 10, 2023
871928a
Deprecations for 0.32 from me! (#4316)
timmysilv Jul 10, 2023
db1a5f1
Merge branch 'master' into grad_mutate
mudit2812 Jul 10, 2023
031f1a3
trigger ci
mudit2812 Jul 10, 2023
084f515
trigger ci
mudit2812 Jul 11, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@

<h3>Improvements 🛠</h3>

* The `qml.gradients` module no longer mutates operators in-place for any gradient transforms.
Instead, operators that need to be mutated are copied with new parameters.
[(#4220)](https://github.com/PennyLaneAI/pennylane/pull/4220)

* `PauliWord` sparse matrices are much faster, which directly improves `PauliSentence`.
[#4272](https://github.com/PennyLaneAI/pennylane/pull/4272)

Expand Down Expand Up @@ -52,4 +56,5 @@ This release contains contributions from (in alphabetical order):

Christina Lee,
Borja Requena,
Mudit Pandey,
mudit2812 marked this conversation as resolved.
Show resolved Hide resolved
Matthew Silverman
76 changes: 47 additions & 29 deletions pennylane/gradients/general_shift_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@

import numpy as np
import pennylane as qml
from pennylane.measurements import MeasurementProcess
from pennylane.ops.functions import bind_new_parameters
from pennylane.tape import QuantumScript


def process_shifts(rule, tol=1e-10, batch_duplicates=True):
Expand Down Expand Up @@ -378,6 +381,44 @@ def generate_multi_shift_rule(frequencies, shifts=None, orders=None):
return _combine_shift_rules(rules)


def _copy_and_shift_params(tape, indices, shifts, multipliers, cast=False):
"""Create a copy of a tape and of parameters, and set the new tape to the parameters
rescaled and shifted as indicated by ``indices``, ``multipliers`` and ``shifts``."""
all_ops = tape.circuit
mudit2812 marked this conversation as resolved.
Show resolved Hide resolved
mudit2812 marked this conversation as resolved.
Show resolved Hide resolved

for idx, shift, multiplier in zip(indices, shifts, multipliers):
_, op_idx, p_idx = tape.get_operation(idx)
op = (
all_ops[op_idx].obs
if isinstance(all_ops[op_idx], MeasurementProcess)
else all_ops[op_idx]
)
timmysilv marked this conversation as resolved.
Show resolved Hide resolved

# Shift copied parameter
new_params = list(op.data)
new_params[p_idx] = new_params[p_idx] * qml.math.convert_like(multiplier, new_params[p_idx])
new_params[p_idx] = new_params[p_idx] + qml.math.convert_like(shift, new_params[p_idx])
if cast:
dtype = getattr(new_params[p_idx], "dtype", float)
new_params[p_idx] = qml.math.cast(new_params[p_idx], dtype)

# Create operator with shifted parameter and put into shifted tape
shifted_op = bind_new_parameters(op, new_params)
frederikwilde marked this conversation as resolved.
Show resolved Hide resolved
if op_idx < len(tape.operations):
all_ops[op_idx] = shifted_op
else:
mp = all_ops[op_idx].__class__
all_ops[op_idx] = mp(obs=shifted_op)

# pylint: disable=protected-access
prep = all_ops[: len(tape._prep)]
ops = all_ops[len(tape._prep) : len(tape.operations)]
meas = all_ops[len(tape.operations) :]
shifted_tape = QuantumScript(ops=ops, measurements=meas, prep=prep, shots=tape.shots)

return shifted_tape


def generate_shifted_tapes(tape, index, shifts, multipliers=None, broadcast=False):
r"""Generate a list of tapes or a single broadcasted tape, where one marked
trainable parameter has been shifted by the provided shift values.
Expand All @@ -403,27 +444,14 @@ def generate_shifted_tapes(tape, index, shifts, multipliers=None, broadcast=Fals
the ``batch_size`` of the returned tape matches the length of ``shifts``.
"""

def _copy_and_shift_params(tape, params, idx, shift, mult):
"""Create a copy of a tape and of parameters, and set the new tape to the parameters
rescaled and shifted as indicated by ``idx``, ``mult`` and ``shift``."""
new_params = params.copy()
new_params[idx] = new_params[idx] * qml.math.convert_like(
mult, new_params[idx]
) + qml.math.convert_like(shift, new_params[idx])

shifted_tape = tape.copy(copy_operations=True)
shifted_tape.set_parameters(new_params)
return shifted_tape

params = list(tape.get_parameters())
if multipliers is None:
multipliers = np.ones_like(shifts)

if broadcast:
return (_copy_and_shift_params(tape, params, index, shifts, multipliers),)
return (_copy_and_shift_params(tape, [index], [shifts], [multipliers]),)

return tuple(
_copy_and_shift_params(tape, params, index, shift, multiplier)
_copy_and_shift_params(tape, [index], [shift], [multiplier])
for shift, multiplier in zip(shifts, multipliers)
)

Expand All @@ -450,22 +478,12 @@ def generate_multishifted_tapes(tape, indices, shifts, multipliers=None):
of tapes will match the summed lengths of all inner sequences in ``shifts``
and ``multipliers`` (if provided).
"""
params = list(tape.get_parameters())
if multipliers is None:
multipliers = np.ones_like(shifts)

tapes = []

for _shifts, _multipliers in zip(shifts, multipliers):
new_params = params.copy()
shifted_tape = tape.copy(copy_operations=True)
for idx, shift, multiplier in zip(indices, _shifts, _multipliers):
dtype = getattr(new_params[idx], "dtype", float)
new_params[idx] = new_params[idx] * qml.math.convert_like(multiplier, new_params[idx])
new_params[idx] = new_params[idx] + qml.math.convert_like(shift, new_params[idx])
new_params[idx] = qml.math.cast(new_params[idx], dtype)

shifted_tape.set_parameters(new_params)
tapes.append(shifted_tape)
tapes = [
_copy_and_shift_params(tape, indices, _shifts, _multipliers, cast=True)
for _shifts, _multipliers in zip(shifts, multipliers)
]

return tapes
4 changes: 3 additions & 1 deletion pennylane/tape/qscript.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,10 +428,12 @@ def _update_par_info(self):
{"op": op, "op_idx": idx, "p_idx": i} for i, d in enumerate(op.data)
)

n_ops = len(self.operations)
for idx, m in enumerate(self.measurements):
if m.obs is not None:
self._par_info.extend(
{"op": m.obs, "op_idx": idx, "p_idx": i} for i, d in enumerate(m.obs.data)
{"op": m.obs, "op_idx": idx + n_ops, "p_idx": i}
mudit2812 marked this conversation as resolved.
Show resolved Hide resolved
mudit2812 marked this conversation as resolved.
Show resolved Hide resolved
for i, d in enumerate(m.obs.data)
)

def _update_trainable_params(self):
Expand Down
4 changes: 2 additions & 2 deletions tests/legacy/test_qscript_old.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def test_update_par_info_update_trainable_params(self):
assert p_i[4] == {"op": ops[2], "op_idx": 2, "p_idx": 0}
assert p_i[5] == {"op": ops[3], "op_idx": 3, "p_idx": 0}
assert p_i[6] == {"op": ops[3], "op_idx": 3, "p_idx": 1}
assert p_i[7] == {"op": m[0].obs, "op_idx": 0, "p_idx": 0}
assert p_i[7] == {"op": m[0].obs, "op_idx": 4, "p_idx": 0}

assert qs._trainable_params == list(range(8))

Expand Down Expand Up @@ -224,7 +224,7 @@ def test_get_operation(self):
assert op_6 == ops[4] and op_id_6 == 4 and p_id_6 == 1

_, obs_id_0, p_id_0 = qs.get_operation(7)
assert obs_id_0 == 0 and p_id_0 == 0
assert obs_id_0 == 5 and p_id_0 == 0

def test_update_observables(self):
"""This method needs to be more thoroughly tested, and probably even reconsidered in
Expand Down
4 changes: 2 additions & 2 deletions tests/tape/test_qscript.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def test_update_par_info_update_trainable_params(self):
assert p_i[4] == {"op": ops[2], "op_idx": 2, "p_idx": 0}
assert p_i[5] == {"op": ops[3], "op_idx": 3, "p_idx": 0}
assert p_i[6] == {"op": ops[3], "op_idx": 3, "p_idx": 1}
assert p_i[7] == {"op": m[0].obs, "op_idx": 0, "p_idx": 0}
assert p_i[7] == {"op": m[0].obs, "op_idx": 4, "p_idx": 0}

assert qs._trainable_params == list(range(8))

Expand Down Expand Up @@ -222,7 +222,7 @@ def test_get_operation(self):
assert op_6 == ops[4] and op_id_6 == 4 and p_id_6 == 1

_, obs_id_0, p_id_0 = qs.get_operation(7)
assert obs_id_0 == 0 and p_id_0 == 0
assert obs_id_0 == 5 and p_id_0 == 0

def test_update_observables(self):
"""This method needs to be more thoroughly tested, and probably even reconsidered in
Expand Down