-
Notifications
You must be signed in to change notification settings - Fork 127
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Make CI tests faster #1246
Make CI tests faster #1246
Changes from all commits
289f6e7
332d5cb
b7caef6
61f54e8
1912610
d1e5e59
34a4bf8
ce5a7cc
b0b391a
4e4a960
27f1f70
6d7c3bc
83df5a0
930739f
746a96c
cc2a362
560cd72
9a08d44
16b9d92
25ca37c
5118985
59aecd9
164413a
2c02209
e6d1f4d
9021092
0ef0745
6757ea7
ddb9248
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,3 @@ | ||
[DEFAULT] | ||
test_path=./test | ||
parallel_class=True |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -48,58 +48,42 @@ def setUp(self): | |
self.x_plus = xp | ||
self.test_tol = 0.1 | ||
|
||
# pylint: disable=no-member | ||
def test_end_to_end(self): | ||
@data( | ||
(None, None, None), | ||
(0.0044, None, None), | ||
(0.04, np.linspace(-4, 4, 31), {"beta": 1.8, "freq": 0.08}), | ||
) | ||
@unpack | ||
def test_end_to_end(self, freq, betas, p0_opt): | ||
"""Test the drag experiment end to end.""" | ||
|
||
drag_experiment_helper = DragHelper(gate_name="Drag(xp)") | ||
if freq: | ||
drag_experiment_helper.frequency = freq | ||
backend = MockIQBackend(drag_experiment_helper) | ||
|
||
drag = RoughDrag([1], self.x_plus) | ||
drag.set_run_options(shots=200) | ||
|
||
if betas is not None: | ||
drag.set_experiment_options(betas=betas) | ||
if p0_opt: | ||
drag.analysis.set_options(p0=p0_opt) | ||
|
||
expdata = drag.run(backend) | ||
self.assertExperimentDone(expdata) | ||
result = expdata.analysis_results(1) | ||
|
||
# pylint: disable=no-member | ||
self.assertTrue(abs(result.value.n - backend.experiment_helper.ideal_beta) < self.test_tol) | ||
self.assertEqual(result.quality, "good") | ||
|
||
# Small leakage will make the curves very flat, in this case one should | ||
# rather increase beta. | ||
drag_experiment_helper.frequency = 0.0044 | ||
|
||
drag = RoughDrag([0], self.x_plus) | ||
exp_data = drag.run(backend) | ||
self.assertExperimentDone(exp_data) | ||
result = exp_data.analysis_results(1) | ||
|
||
# pylint: disable=no-member | ||
self.assertTrue(abs(result.value.n - backend.experiment_helper.ideal_beta) < self.test_tol) | ||
self.assertEqual(result.quality, "good") | ||
|
||
# Large leakage will make the curves oscillate quickly. | ||
drag_experiment_helper.frequency = 0.04 | ||
drag = RoughDrag([1], self.x_plus, betas=np.linspace(-4, 4, 31)) | ||
# pylint: disable=no-member | ||
drag.set_run_options(shots=200) | ||
drag.analysis.set_options(p0={"beta": 1.8, "freq": 0.08}) | ||
exp_data = drag.run(backend) | ||
self.assertExperimentDone(exp_data) | ||
result = exp_data.analysis_results(1) | ||
|
||
meas_level = exp_data.metadata["meas_level"] | ||
|
||
self.assertEqual(meas_level, MeasLevel.CLASSIFIED) | ||
self.assertTrue(abs(result.value.n - backend.experiment_helper.ideal_beta) < self.test_tol) | ||
self.assertEqual(result.quality, "good") | ||
self.assertEqual(expdata.metadata["meas_level"], MeasLevel.CLASSIFIED) | ||
|
||
@data( | ||
(0.0040, 1.0, 0.00, [1, 3, 5], None, 0.1), # partial oscillation. | ||
(0.0020, 0.5, 0.00, [1, 3, 5], None, 0.5), # even slower oscillation with amp < 1 | ||
(0.0040, 0.8, 0.05, [3, 5, 7], None, 0.1), # constant offset, i.e. lower SNR. | ||
(0.0800, 0.9, 0.05, [1, 3, 5], np.linspace(-1, 1, 51), 0.1), # Beta not in range | ||
(0.2000, 0.5, 0.10, [1, 3, 5], np.linspace(-2.5, 2.5, 51), 0.1), # Max closer to zero | ||
(0.0040, 1.0, 0.00, [1, 3, 5], None, 0.2), # partial oscillation. | ||
(0.0020, 0.5, 0.00, [1, 3, 5], None, 1.0), # even slower oscillation with amp < 1 | ||
(0.0040, 0.8, 0.05, [3, 5, 7], None, 0.2), # constant offset, i.e. lower SNR. | ||
(0.0800, 0.9, 0.05, [1, 3, 5], np.linspace(-1, 1, 51), 0.2), # Beta not in range | ||
(0.2000, 0.5, 0.10, [1, 3, 5], np.linspace(-2.5, 2.5, 51), 0.2), # Max closer to zero | ||
) | ||
@unpack | ||
def test_nasty_data(self, freq, amp, offset, reps, betas, tol): | ||
|
@@ -113,6 +97,7 @@ def test_nasty_data(self, freq, amp, offset, reps, betas, tol): | |
|
||
drag = RoughDrag([0], self.x_plus, betas=betas) | ||
drag.set_experiment_options(reps=reps) | ||
drag.set_run_options(shots=500) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wonder if we should try to refactor MockIQBackend if reducing shots like this makes a difference. We are working with probabilities, so drawing shots from them shouldn't be too slow, but maybe the Result format is just inefficient (lots of nested dictionaries instead of numpy arrays)? |
||
|
||
exp_data = drag.run(backend) | ||
self.assertExperimentDone(exp_data) | ||
|
@@ -190,7 +175,7 @@ def test_default_circuits(self): | |
def test_circuit_roundtrip_serializable(self): | ||
"""Test circuit serializations for drag experiment.""" | ||
drag = RoughDrag([0], self.x_plus) | ||
drag.set_experiment_options(reps=[2, 4, 8]) | ||
drag.set_experiment_options(reps=[2, 4], betas=[-5, 5]) | ||
drag.backend = FakeWashingtonV2() | ||
self.assertRoundTripSerializable(drag._transpiled_circuits()) | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -160,7 +160,7 @@ def test_experiment_config(self): | |
|
||
def test_roundtrip_serializable(self): | ||
"""Test round trip JSON serialization""" | ||
exp = QubitSpectroscopy([1], np.linspace(int(100e6), int(150e6), int(20e6))) | ||
exp = QubitSpectroscopy([1], np.linspace(int(100e6), int(150e6), 4)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 20e6 🤣 |
||
# Checking serialization of the experiment | ||
self.assertRoundTripSerializable(exp) | ||
|
||
|
@@ -270,7 +270,9 @@ def test_parallel_experiment(self): | |
par_experiment = ParallelExperiment( | ||
exp_list, flatten_results=False, backend=parallel_backend | ||
) | ||
par_experiment.set_run_options(meas_level=MeasLevel.KERNELED, meas_return="single") | ||
par_experiment.set_run_options( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This change is fine, but I wonder why we test parallel experiments here. Ideally, each experiment works okay on its own and then there are specific tests for ParallelExperiment. I don't see why individual experiments need to test parallel execution. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think you're right. @ItamarGoldman do you think this test is still necessary? Seems like we can remove it since parallel experiments are tested elsewhere. |
||
meas_level=MeasLevel.KERNELED, meas_return="single", shots=20 | ||
) | ||
|
||
par_data = par_experiment.run() | ||
self.assertExperimentDone(par_data) | ||
|
@@ -288,7 +290,7 @@ def test_circuit_roundtrip_serializable(self): | |
backend = FakeWashingtonV2() | ||
qubit = 1 | ||
freq01 = BackendData(backend).drive_freqs[qubit] | ||
frequencies = np.linspace(freq01 - 10.0e6, freq01 + 10.0e6, 21) | ||
frequencies = np.linspace(freq01 - 10.0e6, freq01 + 10.0e6, 3) | ||
exp = QubitSpectroscopy([1], frequencies, backend=backend) | ||
# Checking serialization of the experiment | ||
self.assertRoundTripSerializable(exp._transpiled_circuits()) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I tried both
gentle
options.False
will cause the tests to exit earlier when there's a timeout, but the message given is "The following tests exited without returning a status and likely segfaulted or crashed Python", which is cryptic. Since tests are relatively fast now, I think it's okay to useTrue
which will show the timeout exception on tests that take too long and not stop running tests upon failure.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah the
gentle=False
sendSIGALRM
to the process without setting a handler which will kill the process by default. If the process exits before the test worker returns an event for it's final status stestr prints an error saying it never received a status for the test that executed (which is what a segfault looks like to stestr).