diff --git a/runcards/coherence_flux.py b/runcards/coherence_flux.py new file mode 100644 index 000000000..abefa73ce --- /dev/null +++ b/runcards/coherence_flux.py @@ -0,0 +1,201 @@ +from argparse import ArgumentParser +from pathlib import Path +from typing import Optional + +import numpy as np +from qibolab import create_platform + +from qibocal.auto.execute import Executor +from qibocal.cli.report import report +from qibocal.protocols.flux_dependence.utils import ( + transmon_frequency, + transmon_readout_frequency, +) + +biases = np.arange(-0.025, 0.025, 0.002) +"bias points to sweep" + +# Qubit spectroscopy +freq_width = 10_000_000 +"""Width [Hz] for frequency sweep relative to the qubit frequency.""" +freq_step = 500_000 +"""Frequency [Hz] step for sweep.""" +drive_duration = 1000 +"""Drive pulse duration [ns]. Same for all qubits.""" + +# Rabi amp signal +min_amp_factor = 0.0 +"""Minimum amplitude multiplicative factor.""" +max_amp_factor = 1.5 +"""Maximum amplitude multiplicative factor.""" +step_amp_factor = 0.01 +"""Step amplitude multiplicative factor.""" + +# Flipping +nflips_max = 200 +"""Maximum number of flips ([RX(pi) - RX(pi)] sequences). """ +nflips_step = 10 +"""Flip step.""" + +# T1 signal +delay_before_readout_start = 16 +"""Initial delay before readout [ns].""" +delay_before_readout_end = 100_000 +"""Final delay before readout [ns].""" +delay_before_readout_step = 4_000 +"""Step delay before readout [ns].""" + +# Ramsey signal +detuning = 3_000_000 +"""Frequency detuning [Hz].""" +delay_between_pulses_start = 16 +"""Initial delay between RX(pi/2) pulses in ns.""" +delay_between_pulses_end = 1_000 +"""Final delay between RX(pi/2) pulses in ns.""" +delay_between_pulses_step = 5 +"""Step delay between RX(pi/2) pulses in ns.""" + +# T2 and Ramsey signal +delay_between_pulses_start_T2 = 16 +"""Initial delay between RX(pi/2) pulses in ns.""" +delay_between_pulses_end_T2 = 80_000 +"""Final delay between RX(pi/2) pulses in ns.""" +delay_between_pulses_step_T2 = 500 +"""Step delay between RX(pi/2) pulses in ns.""" +single_shot_T2: bool = False +"""If ``True`` save single shot signal data.""" + +# Optional qubit spectroscopy +drive_amplitude: Optional[float] = 0.1 +"""Drive pulse amplitude (optional). Same for all qubits.""" +hardware_average: bool = True +"""By default hardware average will be performed.""" +# Optional rabi amp signal +pulse_length: Optional[float] = 40 +"""RX pulse duration [ns].""" +# Optional T1 signal +single_shot_T1: bool = False +"""If ``True`` save single shot signal data.""" + + +parser = ArgumentParser() +parser.add_argument("--target", nargs="+", required=True, help="Target qubit") +parser.add_argument("--platform", type=str, required=True, help="Platform name") +parser.add_argument( + "--path", type=str, default="TTESTCoherenceFlux1", help="Path for the output" +) +args = parser.parse_args() + +targets = args.target +path = args.path + + +fit_function = transmon_frequency +platform = create_platform(args.platform) + +for target in targets: + params_qubit = { + "w_max": platform.qubits[target].drive_frequency, + "xj": 0, + "d": platform.qubits[target].asymmetry, + "normalization": platform.qubits[target].crosstalk_matrix[target], + "offset": -platform.qubits[target].sweetspot + * platform.qubits[target].crosstalk_matrix[target], + "crosstalk_element": 1, + "charging_energy": platform.qubits[target].Ec, + } + + # NOTE: Center around the sweetspot + centered_biases = biases + platform.qubits[target].sweetspot + + for i, bias in enumerate(centered_biases): + with Executor.open( + f"myexec_{i}", + path=args.path / Path(f"flux_{bias}"), + platform=args.platform, + targets=[target], + update=True, + force=True, + ) as e: + + # Change the flux + e.platform.qubits[target].flux.offset = bias + + # Change the qubit frequency + qubit_frequency = fit_function(bias, **params_qubit) # * 1e9 + + res_frequency = transmon_readout_frequency( + bias, + **params_qubit, + g=platform.qubits[target].g, + resonator_freq=platform.qubits[target].bare_resonator_frequency, + ) + e.platform.qubits[target].drive_frequency = qubit_frequency + e.platform.qubits[target].native_gates.RX.frequency = qubit_frequency + + res_spectroscopy_output = e.resonator_spectroscopy( + freq_width=freq_width, + freq_step=freq_step, + power_level="low", + relaxation_time=2000, + nshots=1024, + ) + rabi_output = e.rabi_amplitude_signal( + min_amp_factor=min_amp_factor, + max_amp_factor=max_amp_factor, + step_amp_factor=step_amp_factor, + pulse_length=e.platform.qubits[target].native_gates.RX.duration, + ) + + if rabi_output.results.amplitude[target] > 0.5: + print( + f"Rabi fit has pi pulse amplitude {rabi_output.results.amplitude[target]}, greater than 0.5 not possible for QM. Skipping to next bias point." + ) + e.platform.qubits[target].native_gates.RX.amplitude = ( + 0.5 # FIXME: For QM this should be 0.5 + ) + + report(e.path, e.history) + continue + + classification_output = e.single_shot_classification(nshots=5000) + ramsey_output = e.ramsey( + delay_between_pulses_start=delay_between_pulses_start, + delay_between_pulses_end=delay_between_pulses_end, + delay_between_pulses_step=delay_between_pulses_step, + detuning=detuning, + ) + + ramsey_output = e.ramsey( + delay_between_pulses_start=delay_between_pulses_start, + delay_between_pulses_end=delay_between_pulses_end, + delay_between_pulses_step=delay_between_pulses_step, + detuning=detuning, + ) + classification_output = e.single_shot_classification(nshots=5000) + t1_output = e.t1( + delay_before_readout_start=delay_before_readout_start, + delay_before_readout_end=delay_before_readout_end, + delay_before_readout_step=delay_before_readout_step, + single_shot=single_shot_T1, + ) + + ramsey_t2_output = e.ramsey( + delay_between_pulses_start=delay_between_pulses_start_T2, + delay_between_pulses_end=delay_between_pulses_end_T2, + delay_between_pulses_step=delay_between_pulses_step_T2, + detuning=0, + ) + readout_characterization_out = e.readout_characterization( + delay=1000, + nshots=5000, + ) + rb_out = e.rb_ondevice( + num_of_sequences=10000, + max_circuit_depth=1000, + delta_clifford=20, + n_avg=1, + save_sequences=False, + apply_inverse=True, + ) + report(e.path, e.history) diff --git a/src/qibocal/auto/operation.py b/src/qibocal/auto/operation.py index 8b1f602eb..1d7527ae8 100644 --- a/src/qibocal/auto/operation.py +++ b/src/qibocal/auto/operation.py @@ -181,7 +181,7 @@ def load_data(path: Path, filename: str): """Load data stored in a npz file.""" file = path / f"{filename}.npz" if file.is_file(): - raw_data_dict = dict(np.load(file)) + raw_data_dict = dict(np.load(file, allow_pickle=True)) data_dict = {} for data_key, array in raw_data_dict.items(): diff --git a/src/qibocal/auto/output.py b/src/qibocal/auto/output.py index 275c4cfd8..6247273e7 100644 --- a/src/qibocal/auto/output.py +++ b/src/qibocal/auto/output.py @@ -211,7 +211,6 @@ def update_platform(platform: Platform, path: Path): platpath = path / PLATFORM if platpath.is_dir(): platpath = path / UPDATED_PLATFORM - platpath.mkdir(parents=True, exist_ok=True) dump_platform(platform, platpath) diff --git a/src/qibocal/cli/report.py b/src/qibocal/cli/report.py index b6c4e6e20..db6ebb54d 100644 --- a/src/qibocal/cli/report.py +++ b/src/qibocal/cli/report.py @@ -44,14 +44,17 @@ def plotter( completed node on specific target. """ figures, fitting_report = generate_figures_and_report(node, target) - buffer = io.StringIO() - html_list = [] - for figure in figures: - figure.write_html(buffer, include_plotlyjs=False, full_html=False) - buffer.seek(0) - html_list.append(buffer.read()) - buffer.close() - all_html = "".join(html_list) + if isinstance(figures[0], str): + all_html = "".join(figures) + else: + buffer = io.StringIO() + html_list = [] + for figure in figures: + figure.write_html(buffer, include_plotlyjs=False, full_html=False) + buffer.seek(0) + html_list.append(buffer.read()) + buffer.close() + all_html = "".join(html_list) return all_html, fitting_report diff --git a/src/qibocal/protocols/__init__.py b/src/qibocal/protocols/__init__.py index 63ff0687c..0d3859322 100644 --- a/src/qibocal/protocols/__init__.py +++ b/src/qibocal/protocols/__init__.py @@ -27,6 +27,7 @@ from .flux_dependence.qubit_flux_tracking import qubit_flux_tracking from .flux_dependence.resonator_crosstalk import resonator_crosstalk from .flux_dependence.resonator_flux_dependence import resonator_flux +from .qua import rb_ondevice from .qubit_power_spectroscopy import qubit_power_spectroscopy from .qubit_spectroscopy import qubit_spectroscopy from .qubit_spectroscopy_ef import qubit_spectroscopy_ef @@ -146,6 +147,7 @@ "rabi_length_frequency", "rabi_length_frequency_signal", "standard_rb_2q", + "rb_ondevice", "standard_rb_2q_inter", "optimize_two_qubit_gate", ] diff --git a/src/qibocal/protocols/qua/__init__.py b/src/qibocal/protocols/qua/__init__.py new file mode 100644 index 000000000..deeb29b40 --- /dev/null +++ b/src/qibocal/protocols/qua/__init__.py @@ -0,0 +1 @@ +from .rb_ondevice import rb_ondevice diff --git a/src/qibocal/protocols/qua/configuration.py b/src/qibocal/protocols/qua/configuration.py new file mode 100644 index 000000000..d457e3958 --- /dev/null +++ b/src/qibocal/protocols/qua/configuration.py @@ -0,0 +1,160 @@ +from dataclasses import asdict + +from qibolab.instruments.qm import QMController +from qibolab.instruments.qm.config import QMConfig + +NATIVE_OPS = { + "x180": lambda q: (f"plus_i_{q}", f"plus_q_{q}"), + "y180": lambda q: (f"minus_q_{q}", f"plus_i_{q}"), + "x90": lambda q: (f"plus_i_half_{q}", f"plus_q_half_{q}"), + "y90": lambda q: (f"minus_q_half_{q}", f"plus_i_half_{q}"), + "-x90": lambda q: (f"minus_i_half_{q}", f"minus_q_half_{q}"), + "-y90": lambda q: (f"plus_q_half_{q}", f"minus_i_half_{q}"), +} + + +def native_operations(qubit): + return {op: f"{op}_{qubit}" for op in NATIVE_OPS.keys()} + + +def drive_waveform_components(qubit, mode, samples): + return { + f"plus_{mode}_{qubit}": { + "type": "arbitrary", + "samples": samples, + }, + f"minus_{mode}_{qubit}": { + "type": "arbitrary", + "samples": -samples, + }, + f"plus_{mode}_half_{qubit}": { + "type": "arbitrary", + "samples": samples / 2, + }, + f"minus_{mode}_half_{qubit}": { + "type": "arbitrary", + "samples": -samples / 2, + }, + } + + +def drive_waveforms(platform, qubit): + pulse = platform.qubits[qubit].native_gates.RX.pulse(start=0) + envelope_i, envelope_q = pulse.envelope_waveforms(sampling_rate=1) + return drive_waveform_components( + qubit, "i", envelope_i.data + ) | drive_waveform_components(qubit, "q", envelope_q.data) + + +def waveforms(platform, qubits): + _waveforms = { + "zero": { + "type": "constant", + "sample": 0.0, + }, + } + _waveforms.update( + { + f"mz_{q}": { + "type": "constant", + "sample": platform.qubits[q].native_gates.MZ.amplitude, + } + for q in qubits + } + ) + for q in qubits: + _waveforms.update(drive_waveforms(platform, q)) + return _waveforms + + +def drive_pulses(platform, qubit): + _pulses = {} + for op, wf in NATIVE_OPS.items(): + i, q = wf(qubit) + _pulses[f"{op}_{qubit}"] = { + "operation": "control", + "length": platform.qubits[qubit].native_gates.RX.duration, + "waveforms": { + "I": i, + "Q": q, + }, + "digital_marker": "ON", + } + return _pulses + + +def pulses(platform, qubits): + _pulses = { + f"mz_{q}": { + "operation": "measurement", + "length": platform.qubits[q].native_gates.MZ.duration, + "waveforms": { + "I": f"mz_{q}", + "Q": "zero", + }, + "integration_weights": { + "cos": f"cosine_weights{q}", + "sin": f"sine_weights{q}", + "minus_sin": f"minus_sine_weights{q}", + }, + "digital_marker": "ON", + } + for q in qubits + } + for q in qubits: + _pulses.update(drive_pulses(platform, q)) + return _pulses + + +def integration_weights(platform, qubits): + _integration_weights = {} + for q in qubits: + _duration = platform.qubits[q].native_gates.MZ.duration + _integration_weights.update( + { + f"cosine_weights{q}": { + "cosine": [(1.0, _duration)], + "sine": [(-0.0, _duration)], + }, + f"sine_weights{q}": { + "cosine": [(0.0, _duration)], + "sine": [(1.0, _duration)], + }, + f"minus_sine_weights{q}": { + "cosine": [(-0.0, _duration)], + "sine": [(-1.0, _duration)], + }, + } + ) + return _integration_weights + + +def register_element(config, qubit, time_of_flight, smearing): + config.register_port(qubit.readout.port) + config.register_port(qubit.feedback.port) + mz_frequency = qubit.native_gates.MZ.frequency - qubit.readout.lo_frequency + config.register_readout_element(qubit, mz_frequency, time_of_flight, smearing) + config.register_port(qubit.drive.port) + rx_frequency = qubit.native_gates.RX.frequency - qubit.drive.lo_frequency + config.register_drive_element(qubit, rx_frequency) + if qubit.flux is not None: + config.register_port(qubit.flux.port) + config.register_flux_element(qubit) + + +def generate_config(platform, qubits): + con = [ + instr + for instr in platform.instruments.values() + if isinstance(instr, QMController) + ][0] + config = QMConfig() + for q in qubits: + qubit = platform.qubits[q] + register_element(config, qubit, con.time_of_flight, con.smearing) + config.elements[f"readout{q}"]["operations"]["measure"] = f"mz_{q}" + config.elements[f"drive{q}"]["operations"] = native_operations(q) + config.pulses = pulses(platform, qubits) + config.waveforms = waveforms(platform, qubits) + config.integration_weights = integration_weights(platform, qubits) + return asdict(config) diff --git a/src/qibocal/protocols/qua/qua_clifford_group.npz b/src/qibocal/protocols/qua/qua_clifford_group.npz new file mode 100644 index 000000000..4cc9f4d1c Binary files /dev/null and b/src/qibocal/protocols/qua/qua_clifford_group.npz differ diff --git a/src/qibocal/protocols/qua/rb_ondevice.py b/src/qibocal/protocols/qua/rb_ondevice.py new file mode 100644 index 000000000..8fe571bb9 --- /dev/null +++ b/src/qibocal/protocols/qua/rb_ondevice.py @@ -0,0 +1,565 @@ +import time +from dataclasses import dataclass, field +from typing import Optional + +import matplotlib.pyplot as plt +import mpld3 +import numpy as np +import numpy.typing as npt +from qibolab.platform import Platform +from qibolab.qubits import QubitId +from qm import generate_qua_script +from qm.qua import * # nopycln: import +from qualang_tools.bakery.randomized_benchmark_c1 import c1_table +from qualang_tools.results import fetching_tool +from scipy.optimize import curve_fit + +from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.protocols.utils import table_dict, table_html + +from .configuration import generate_config +from .utils import RBType, filter_function, filter_term, generate_depths, power_law + +# parser.add_argument("--simulation-duration", type=int, default=None) +# parser.add_argument("--relaxation-time", type=int, default=None) + + +@dataclass +class RbOnDeviceParameters(Parameters): + num_of_sequences: int + max_circuit_depth: int + "Maximum circuit depth" + delta_clifford: int + "Play each sequence with a depth step equals to delta_clifford" + seed: Optional[int] = None + "Pseudo-random number generator seed" + n_avg: int = 1 + "Number of averaging loops for each random sequence" + save_sequences: bool = True + apply_inverse: bool = False + state_discrimination: bool = True + "Flag to enable state discrimination if the readout has been calibrated (rotated blobs and threshold)" + debug: bool = False + "If enabled it dumps the qua script." + + def __post_init__(self): + if self.seed is None: + self.seed = np.random.randint(0, int(1e6)) + + +def generate_sequence(max_circuit_depth, seed): + sequence = declare(int, size=max_circuit_depth + 1) + i = declare(int) + rand = Random(seed=seed) + + with for_(i, 0, i < max_circuit_depth, i + 1): + assign(sequence[i], rand.rand_int(24)) + + return sequence + + +def generate_sequence_with_inverse(max_circuit_depth, seed, c1_table, inv_gates): + cayley = declare(int, value=c1_table.flatten().tolist()) + inv_list = declare(int, value=inv_gates) + current_state = declare(int) + step = declare(int) + sequence = declare(int, size=max_circuit_depth + 1) + inv_gate = declare(int, size=max_circuit_depth + 1) + i = declare(int) + rand = Random(seed=seed) + + assign(current_state, 0) + with for_(i, 0, i < max_circuit_depth, i + 1): + assign(step, rand.rand_int(24)) + assign(current_state, cayley[current_state * 24 + step]) + assign(sequence[i], step) + assign(inv_gate[i], inv_list[current_state]) + + return sequence, inv_gate + + +def play_sequence(qubit, sequence_list, apply_inverse, depth, rx_duration): + i = declare(int) + condition = (i <= depth) if apply_inverse else (i < depth) + with for_(i, 0, condition, i + 1): + with switch_(sequence_list[i], unsafe=True): + with case_(0): + wait(rx_duration // 4, qubit) + with case_(1): + play("x180", qubit) + with case_(2): + play("y180", qubit) + with case_(3): + play("y180", qubit) + play("x180", qubit) + with case_(4): + play("x90", qubit) + play("y90", qubit) + with case_(5): + play("x90", qubit) + play("-y90", qubit) + with case_(6): + play("-x90", qubit) + play("y90", qubit) + with case_(7): + play("-x90", qubit) + play("-y90", qubit) + with case_(8): + play("y90", qubit) + play("x90", qubit) + with case_(9): + play("y90", qubit) + play("-x90", qubit) + with case_(10): + play("-y90", qubit) + play("x90", qubit) + with case_(11): + play("-y90", qubit) + play("-x90", qubit) + with case_(12): + play("x90", qubit) + with case_(13): + play("-x90", qubit) + with case_(14): + play("y90", qubit) + with case_(15): + play("-y90", qubit) + with case_(16): + play("-x90", qubit) + play("y90", qubit) + play("x90", qubit) + with case_(17): + play("-x90", qubit) + play("-y90", qubit) + play("x90", qubit) + with case_(18): + play("x180", qubit) + play("y90", qubit) + with case_(19): + play("x180", qubit) + play("-y90", qubit) + with case_(20): + play("y180", qubit) + play("x90", qubit) + with case_(21): + play("y180", qubit) + play("-x90", qubit) + with case_(22): + play("x90", qubit) + play("y90", qubit) + play("x90", qubit) + with case_(23): + play("-x90", qubit) + play("y90", qubit) + play("-x90", qubit) + + +RbOnDeviceType = np.dtype( + [ + ("state", np.int32), + ("sequences", np.int32), + ] +) + + +@dataclass +class RbOnDeviceData(Data): + rb_type: str + relaxation_time: int + depths: list[int] + data: dict[QubitId, dict[str, npt.NDArray[np.int32]]] + + def _get_data(self, key: str): + qubit = self.qubits[0] + try: + arrays = self.data[qubit].item(0) + return arrays[key] + except AttributeError: + return self.data[qubit][key] + + @property + def state(self): + return self._get_data("state") + + @property + def sequences(self): + return self._get_data("sequences") + + +def _acquisition( + params: RbOnDeviceParameters, platform: Platform, targets: list[QubitId] +) -> RbOnDeviceData: + assert len(targets) == 1 + target = targets[0] + qubit = f"drive{target}" + resonator = f"readout{target}" if target is not None else f"readout{target}" + save_sequences = params.save_sequences + apply_inverse = params.apply_inverse + relaxation_time = params.relaxation_time + + num_of_sequences = params.num_of_sequences + n_avg = params.n_avg + max_circuit_depth = params.max_circuit_depth + delta_clifford = params.delta_clifford + assert ( + max_circuit_depth / delta_clifford + ).is_integer(), "max_circuit_depth / delta_clifford must be an integer." + seed = params.seed + state_discrimination = params.state_discrimination + # List of recovery gates from the lookup table + inv_gates = [int(np.where(c1_table[i, :] == 0)[0][0]) for i in range(24)] + + ################### + # The QUA program # + ################### + with program() as rb: + depth = declare(int) # QUA variable for the varying depth + depth_target = declare( + int + ) # QUA variable for the current depth (changes in steps of delta_clifford) + # QUA variable to store the last Clifford gate of the current sequence which is replaced by the recovery gate + saved_gate = declare(int) + m = declare(int) # QUA variable for the loop over random sequences + n = declare(int) # QUA variable for the averaging loop + I = declare(fixed) # QUA variable for the 'I' quadrature + Q = declare(fixed) # QUA variable for the 'Q' quadrature + state = declare(bool) # QUA variable for state discrimination + # The relevant streams + m_st = declare_stream() + if state_discrimination: + state_st = declare_stream() + else: + I_st = declare_stream() + Q_st = declare_stream() + # save random sequences to return to host + if save_sequences: + sequence_st = declare_stream() + + with for_( + m, 0, m < num_of_sequences, m + 1 + ): # QUA for_ loop over the random sequences + if apply_inverse: + sequence_list, inv_gate_list = generate_sequence_with_inverse( + max_circuit_depth, seed, c1_table, inv_gates + ) # Generate the random sequence of length max_circuit_depth + else: + sequence_list = generate_sequence(max_circuit_depth, seed) + + if delta_clifford == 1: + assign(depth_target, 1) + else: + assign(depth_target, 0) # Initialize the current depth to 0 + + if save_sequences: + save(sequence_list[0], sequence_st) # save sequence indices in stream + with for_( + depth, 1, depth <= max_circuit_depth, depth + 1 + ): # Loop over the depths + if save_sequences: + save(sequence_list[depth], sequence_st) + # Replacing the last gate in the sequence with the sequence's inverse gate + # The original gate is saved in 'saved_gate' and is being restored at the end + assign(saved_gate, sequence_list[depth]) + if apply_inverse: + assign(sequence_list[depth], inv_gate_list[depth - 1]) + # Only played the depth corresponding to target_depth + with if_((depth == 1) | (depth == depth_target)): + with for_(n, 0, n < n_avg, n + 1): # Averaging loop + # Can be replaced by active reset + if relaxation_time > 0: + wait(relaxation_time // 4, resonator) + # Align the two elements to play the sequence after qubit initialization + align(resonator, qubit) + # The strict_timing ensures that the sequence will be played without gaps + with strict_timing_(): + # Play the random sequence of desired depth + rx_duration = platform.qubits[ + target + ].native_gates.RX.duration + play_sequence( + qubit, sequence_list, apply_inverse, depth, rx_duration + ) + # Align the two elements to measure after playing the circuit. + align(qubit, resonator) + # wait(3 * (depth + 1) * (DRIVE_DURATION // 4), resonator) + # Make sure you updated the ge_threshold and angle if you want to use state discrimination + # state, I, Q = readout_macro(threshold=ge_threshold, state=state, I=I, Q=Q) + measure( + "measure", + resonator, + None, + dual_demod.full("cos", "out1", "sin", "out2", I), + dual_demod.full("minus_sin", "out1", "cos", "out2", Q), + ) + # Save the results to their respective streams + if state_discrimination: + threshold = platform.qubits[target].threshold + iq_angle = platform.qubits[target].iq_angle + cos = np.cos(iq_angle) + sin = np.sin(iq_angle) + assign(state, I * cos - Q * sin > threshold) + save(state, state_st) + else: + save(I, I_st) + save(Q, Q_st) + # Go to the next depth + assign(depth_target, depth_target + delta_clifford) + # Reset the last gate of the sequence back to the original Clifford gate + # (that was replaced by the recovery gate at the beginning) + assign(sequence_list[depth], saved_gate) + # Save the counter for the progress bar + save(m, m_st) + + with stream_processing(): + ndepth = max_circuit_depth / delta_clifford + int(delta_clifford > 1) + m_st.save("iteration") + if save_sequences: + sequence_st.buffer(max_circuit_depth + 1).buffer(num_of_sequences).save( + "sequences" + ) + + if state_discrimination: + if n_avg > 1: + # saves a 2D array of depth and random pulse sequences in order to get error bars along the random sequences + state_st.boolean_to_int().buffer(n_avg).map( + FUNCTIONS.average() + ).buffer(ndepth).buffer(num_of_sequences).save("state") + # returns a 1D array of averaged random pulse sequences vs depth of circuit for live plotting + state_st.boolean_to_int().buffer(n_avg).map( + FUNCTIONS.average() + ).buffer(ndepth).average().save("state_avg") + else: + # saves a 2D array of depth and random pulse sequences in order to get error bars along the random sequences + state_st.boolean_to_int().buffer(ndepth).buffer( + num_of_sequences + ).save("state") + # returns a 1D array of averaged random pulse sequences vs depth of circuit for live plotting + state_st.boolean_to_int().buffer(ndepth).average().save("state_avg") + else: + I_st.buffer(n_avg).map(FUNCTIONS.average()).buffer(ndepth).buffer( + num_of_sequences + ).save("I") + Q_st.buffer(n_avg).map(FUNCTIONS.average()).buffer(ndepth).buffer( + num_of_sequences + ).save("Q") + I_st.buffer(n_avg).map(FUNCTIONS.average()).buffer( + ndepth + ).average().save("I_avg") + Q_st.buffer(n_avg).map(FUNCTIONS.average()).buffer( + ndepth + ).average().save("Q_avg") + + # Print total relaxation time (estimate of execution time) + total_relaxation = ( + relaxation_time + * num_of_sequences + * n_avg + * (max_circuit_depth // delta_clifford) + * 1e-9 + ) + print("\nTotal relaxation time: %.2f sec\n" % total_relaxation) + + ##################################### + # Open Communication with the QOP # + ##################################### + controller = platform._controller + qmm = controller.manager + + ########################### + # Run or Simulate Program # + ########################### + # Open the quantum machine + config = generate_config(platform, list(platform.qubits.keys())) + qm = qmm.open_qm(config) + # Send the QUA program to the OPX, which compiles and executes it + job = qm.execute(rb) + + if params.debug: + with open("qua_script.py", "w") as file: + file.write(generate_qua_script(rb, config)) + + # Get results from QUA program + if state_discrimination: + results = fetching_tool(job, data_list=["state_avg", "iteration"], mode="live") + else: + results = fetching_tool( + job, data_list=["I_avg", "Q_avg", "iteration"], mode="live" + ) + + start_time = time.time() + while results.is_processing(): + # data analysis + if state_discrimination: + state_avg, iteration = results.fetch_all() + value_avg = state_avg + else: + I, Q, iteration = results.fetch_all() + value_avg = I + final_time = time.time() + + # At the end of the program, fetch the non-averaged results to get the error-bars + rb_type = RBType.infer(apply_inverse, relaxation_time).value + if state_discrimination: + if save_sequences: + results = fetching_tool(job, data_list=["state", "sequences"]) + state, sequences = results.fetch_all() + data = {target: {"state": state, "sequences": sequences}} + else: + results = fetching_tool(job, data_list=["state"]) + state = results.fetch_all()[0] + data = {target: {"state": state}} + else: + raise NotImplementedError + # if save_sequences: + # results = fetching_tool(job, data_list=["I", "Q", "sequences"]) + # I, Q, sequences = results.fetch_all() + # data.voltage_i[target] = I + # data.voltage_q[target] = Q + # data.sequences[target] = sequences + # else: + # results = fetching_tool(job, data_list=["I", "Q"]) + # I, Q = results.fetch_all() + # data.voltage_i[target] = I + # data.voltage_q[target] = Q + return RbOnDeviceData( + rb_type=rb_type, + relaxation_time=relaxation_time, + depths=[int(x) for x in generate_depths(max_circuit_depth, delta_clifford)], + data=data, + ) + + +@dataclass +class RbOnDeviceResults(Results): + ydata: dict[QubitId, list[float]] = field(default_factory=dict) + ysigma: dict[QubitId, list[float]] = field(default_factory=dict) + pars: dict[QubitId, list[float]] = field(default_factory=dict) + cov: dict[QubitId, list[float]] = field(default_factory=dict) + + +def process_data(data: RbOnDeviceData): + rb_type = RBType(data.rb_type) + depths = data.depths + state = data.state + + if rb_type is RBType.STANDARD: + return 1 - np.mean(state, axis=0), np.std(state, axis=0) / np.sqrt( + state.shape[0] + ) + + is_restless = rb_type is RBType.RESTLESS + term = filter_term(depths, state, data.sequences, is_restless=is_restless) + ff = filter_function(term) + return np.mean(ff, axis=1), np.std(ff, axis=1) / np.sqrt(ff.shape[1]) + + +def _fit(data: RbOnDeviceData) -> RbOnDeviceResults: + qubit = data.qubits[0] + + ydata, ysigma = process_data(data) + results = RbOnDeviceResults( + ydata={qubit: list(ydata)}, ysigma={qubit: list(ysigma)} + ) + try: + pars, cov = curve_fit( + f=power_law, + xdata=data.depths, + ydata=ydata, + sigma=ysigma, + p0=[0.5, 0.0, 0.9], + bounds=(-np.inf, np.inf), + maxfev=2000, + ) + results.pars[qubit] = list(pars) + results.cov[qubit] = list(cov.flatten()) + except RuntimeError: + pass + return results + + +def _plot(data: RbOnDeviceData, target: QubitId, fit: RbOnDeviceResults): + depths = data.depths + state = data.state + + if fit is not None: + ydata = fit.ydata[target] + ysigma = fit.ysigma[target] + else: + ydata, ysigma = process_data(data) + + fitting_report = table_html( + table_dict( + target, + [ + "RB type", + "Number of sequences", + "Relaxation time (us)", + ], + [ + (data.rb_type.capitalize(),), + (len(state),), + (data.relaxation_time // 1000,), + ], + ) + ) + + pars = None + if fit is not None: + pars = fit.pars.get(target) + if pars is not None: + stdevs = np.sqrt(np.diag(np.reshape(fit.cov[target], (3, 3)))) + one_minus_p = 1 - pars[2] + r_c = one_minus_p * (1 - 1 / 2**1) + r_g = r_c / 1.875 # 1.875 is the average number of gates in clifford operation + r_c_std = stdevs[2] * (1 - 1 / 2**1) + r_g_std = r_c_std / 1.875 + fitting_report = "\n".join( + [ + fitting_report, + table_html( + table_dict( + target, + [ + "A", + "B", + "p", + "Error rate (1-p)", + "Clifford set infidelity", + "Gate infidelity", + ], + [ + (np.round(pars[0], 3), np.round(stdevs[0], 3)), + (np.round(pars[1], 3), np.round(stdevs[1], 3)), + (np.round(pars[2], 3), np.round(stdevs[2], 3)), + (one_minus_p, stdevs[2]), + (r_c, r_c_std), + (r_g, r_g_std), + ], + display_error=True, + ) + ), + ] + ) + + fig = plt.figure(figsize=(16, 6)) + title = f"{data.rb_type.capitalize()} RB" + plt.errorbar( + depths, ydata, ysigma, marker="o", linestyle="-", markersize=4, label="data" + ) + if pars is not None: + max_circuit_depth = depths[-1] + x = np.linspace(0, max_circuit_depth + 0.1, 1000) + plt.plot(x, power_law(x, *pars), linestyle="--", label="fit") + plt.xlabel("Depth") + plt.ylabel("Survival probability") + plt.legend() + + figures = [mpld3.fig_to_html(fig)] + return figures, fitting_report + + +def _update(results: RbOnDeviceResults, platform: Platform, target: QubitId): + pass + + +rb_ondevice = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/qua/utils.py b/src/qibocal/protocols/qua/utils.py new file mode 100644 index 000000000..76ae16782 --- /dev/null +++ b/src/qibocal/protocols/qua/utils.py @@ -0,0 +1,98 @@ +from enum import Enum +from pathlib import Path + +import numpy as np + +CLIFFORD = np.load(Path(__file__).parent / "qua_clifford_group.npz") + + +RESTLESS_RELAXATION_CUTOFF = int(20e3) + + +class RBType(Enum): + STANDARD = "standard" + FILTERED = "filtered" + RESTLESS = "restless" + + @classmethod + def infer(cls, apply_inverse, relaxation_time): + if apply_inverse: + return cls.STANDARD + if relaxation_time < RESTLESS_RELAXATION_CUTOFF: + assert not apply_inverse + return cls.RESTLESS + return cls.FILTERED + + +def power_law(power, a, b, p): + """Function to fit to survival probability vs circuit depths.""" + return a * (p**power) + b + + +def clifford_mul(sequences, interleave=None): + """Quick multiplication over single-qubit Clifford group (using integers). + + Requires the `CLIFFORD` file (loaded on top) which contains + the group multiplication matrix and the mapping from integers to + unitary matrices. + The indexing is the one defined by the QUA script used for data acquisition. + """ + group = CLIFFORD["algebra"] + result = np.zeros_like(sequences[:, 0]) + for i in range(sequences.shape[1]): + if interleave is not None: + matrix = group[sequences[:, i], interleave] + else: + matrix = sequences[:, i] + result = group[matrix, result] + return result + + +def generate_depths(max_circuit_depth, delta_clifford): + """Generate vector of depths compatible with the QUA acquisition script.""" + if delta_clifford == 1: + return np.arange(1, max_circuit_depth + 0.1, delta_clifford).astype(int) + depths = np.arange(0, max_circuit_depth + 0.1, delta_clifford).astype(int) + depths[0] = 1 + return depths + + +def filter_term(depths, state, sequences, is_restless=False, interleave=None): + """Calculate Clifford sandwich term that appears in the filter function. + + Args: + depths (np.ndarray): Vector of depths. size: (ndepths,) + state (np.ndarray): Matrix with acquired shots. size (num_of_sequences, ndepths) + sequences (np.ndarray): Matrix with Clifford indices used. size (num_of_sequences, max_circuit_depth) + interleave (int): Optional integer from 0 to 23, corresponding to the Clifford matrix to interleave. + is_restless (bool): If `True` the restless filter function is used. + """ + state = state.astype(int) + seqids = np.arange(len(sequences)) + terms = [] + for i, depth in enumerate(depths): + clifford_indices = clifford_mul(sequences[:, :depth], interleave=interleave) + # `clifford_indices`: (num_of_sequences,) + clifford_matrices = CLIFFORD["matrices"][clifford_indices] + # `clifford_matrices`: (num_of_sequences, 2, 2) + if is_restless: + if i > 0: + state_before = state[:, i - 1] + else: + state_before = np.concatenate(([0], state[:-1, i - 1])) + else: + state_before = 0 + terms.append(clifford_matrices[seqids, state[:, i], state_before]) + return terms + + +def filter_function(x): + """Calculate filter function using Eq. (7) from notes. + + Args: + x: Term calculated by the ``filter_term`` function above. + + Returns: + Filter function output for each data point of shape (ndepths, num_of_sequences). + """ + return 3 * (np.abs(x) ** 2 - 0.5) diff --git a/src/qibocal/protocols/ramsey/ramsey_signal.py b/src/qibocal/protocols/ramsey/ramsey_signal.py index b6f3aa6d3..6f6bcfdf1 100644 --- a/src/qibocal/protocols/ramsey/ramsey_signal.py +++ b/src/qibocal/protocols/ramsey/ramsey_signal.py @@ -295,7 +295,10 @@ def _plot(data: RamseySignalData, target: QubitId, fit: RamseySignalResults = No def _update(results: RamseySignalResults, platform: Platform, target: QubitId): - update.drive_frequency(results.frequency[target][0], platform, target) + if int(results.delta_phys[target][0]) == int(results.delta_fitting[target][0]): + update.t2(results.t2[target][0], platform, target) + else: + update.drive_frequency(results.frequency[target][0], platform, target) ramsey_signal = Routine(_acquisition, _fit, _plot, _update)