diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fde8d1fad..8f9b57dee 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,6 +20,6 @@ repos:
- id: isort
args: ["--profile", "black"]
- repo: https://github.com/asottile/pyupgrade
- rev: v3.3.2
+ rev: v3.4.0
hooks:
- id: pyupgrade
diff --git a/extras/classification/play.py b/extras/classification/play.py
index 2069b32e1..f46ae5656 100644
--- a/extras/classification/play.py
+++ b/extras/classification/play.py
@@ -5,15 +5,15 @@
logging.basicConfig(level=logging.INFO)
data_path = Path("calibrate_qubit_states/data.csv")
-base_dir = Path("_results3")
+base_dir = Path("results")
base_dir.mkdir(exist_ok=True)
for qubit in range(1, 5):
print(f"QUBIT: {qubit}")
qubit_dir = base_dir / f"qubit{qubit}"
table, y_test, x_test = run.train_qubit(
- data_path, base_dir, qubit, classifiers=["qubit_fit"]
+ data_path, base_dir, qubit, classifiers=["qblox_fit"]
)
run.dump_benchmarks_table(table, qubit_dir)
+
plots.plot_table(table, qubit_dir)
- plots.plot_conf_matr(y_test, qubit_dir)
diff --git a/poetry.lock b/poetry.lock
index 02c6ec025..7179a869e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.4.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
[[package]]
name = "absl-py"
@@ -171,6 +171,18 @@ soupsieve = ">1.2"
html5lib = ["html5lib"]
lxml = ["lxml"]
+[[package]]
+name = "blinker"
+version = "1.6.2"
+description = "Fast, simple object-to-object and broadcast signaling"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "blinker-1.6.2-py3-none-any.whl", hash = "sha256:c3d739772abb7bc2860abf5f2ec284223d9ad5c76da018234f6f50d6f31ab1f0"},
+ {file = "blinker-1.6.2.tar.gz", hash = "sha256:4afd3de66ef3a9f8067559fb7a1cbe555c17dcbe15971b05d1b625c3e7abe213"},
+]
+
[[package]]
name = "cachetools"
version = "5.3.0"
@@ -649,22 +661,23 @@ pyrepl = ">=0.8.2"
[[package]]
name = "flask"
-version = "2.2.3"
+version = "2.3.2"
description = "A simple framework for building complex web applications."
category = "main"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "Flask-2.2.3-py3-none-any.whl", hash = "sha256:c0bec9477df1cb867e5a67c9e1ab758de9cb4a3e52dd70681f59fa40a62b3f2d"},
- {file = "Flask-2.2.3.tar.gz", hash = "sha256:7eb373984bf1c770023fce9db164ed0c3353cd0b53f130f4693da0ca756a2e6d"},
+ {file = "Flask-2.3.2-py3-none-any.whl", hash = "sha256:77fd4e1249d8c9923de34907236b747ced06e5467ecac1a7bb7115ae0e9670b0"},
+ {file = "Flask-2.3.2.tar.gz", hash = "sha256:8c2f9abd47a9e8df7f0c3f091ce9497d011dc3b31effcf4c85a6e2b50f4114ef"},
]
[package.dependencies]
-click = ">=8.0"
+blinker = ">=1.6.2"
+click = ">=8.1.3"
importlib-metadata = {version = ">=3.6.0", markers = "python_version < \"3.10\""}
-itsdangerous = ">=2.0"
-Jinja2 = ">=3.0"
-Werkzeug = ">=2.2.2"
+itsdangerous = ">=2.1.2"
+Jinja2 = ">=3.1.2"
+Werkzeug = ">=2.3.3"
[package.extras]
async = ["asgiref (>=3.2)"]
@@ -3202,21 +3215,21 @@ files = [
[[package]]
name = "werkzeug"
-version = "2.2.3"
+version = "2.3.3"
description = "The comprehensive WSGI web application library."
category = "main"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "Werkzeug-2.2.3-py3-none-any.whl", hash = "sha256:56433961bc1f12533306c624f3be5e744389ac61d722175d543e1751285da612"},
- {file = "Werkzeug-2.2.3.tar.gz", hash = "sha256:2e1ccc9417d4da358b9de6f174e3ac094391ea1d4fbef2d667865d819dfd0afe"},
+ {file = "Werkzeug-2.3.3-py3-none-any.whl", hash = "sha256:4866679a0722de00796a74086238bb3b98d90f423f05de039abb09315487254a"},
+ {file = "Werkzeug-2.3.3.tar.gz", hash = "sha256:a987caf1092edc7523edb139edb20c70571c4a8d5eed02e0b547b4739174d091"},
]
[package.dependencies]
MarkupSafe = ">=2.1.1"
[package.extras]
-watchdog = ["watchdog"]
+watchdog = ["watchdog (>=2.3)"]
[[package]]
name = "wheel"
@@ -3322,8 +3335,8 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker
testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
[extras]
-classify = ["keras-tuner", "matplotlib", "scikit-learn", "seaborn", "tensorflow"]
-docs = ["Sphinx", "furo", "recommonmark", "sphinx_markdown_tables", "sphinxcontrib-bibtex"]
+classify = ["scikit-learn", "tensorflow", "keras-tuner", "matplotlib", "seaborn"]
+docs = ["Sphinx", "furo", "sphinxcontrib-bibtex", "recommonmark", "sphinx_markdown_tables"]
viz = ["pydot"]
[metadata]
diff --git a/src/qibocal/calibrations/__init__.py b/src/qibocal/calibrations/__init__.py
index e63441ef8..b4257825a 100644
--- a/src/qibocal/calibrations/__init__.py
+++ b/src/qibocal/calibrations/__init__.py
@@ -8,5 +8,6 @@
from qibocal.calibrations.characterization.ramsey import *
from qibocal.calibrations.characterization.resonator_spectroscopy import *
from qibocal.calibrations.characterization.resonator_spectroscopy_sample import *
+from qibocal.calibrations.characterization.ro_optimization import *
from qibocal.calibrations.characterization.spin_echo import *
from qibocal.calibrations.characterization.t1 import *
diff --git a/src/qibocal/calibrations/characterization/ro_optimization.py b/src/qibocal/calibrations/characterization/ro_optimization.py
new file mode 100644
index 000000000..5eb0061fd
--- /dev/null
+++ b/src/qibocal/calibrations/characterization/ro_optimization.py
@@ -0,0 +1,436 @@
+import numpy as np
+from qibolab.platforms.abstract import AbstractPlatform
+from qibolab.pulses import PulseSequence
+from qibolab.sweeper import Parameter, Sweeper
+
+from qibocal import plots
+from qibocal.data import DataUnits
+from qibocal.decorators import plot
+from qibocal.fitting.methods import ro_optimization_fit
+
+
+@plot("Qubit States", plots.ro_frequency)
+def ro_frequency(
+ platform: AbstractPlatform,
+ qubits: dict,
+ frequency_width: float,
+ frequency_step: float,
+ nshots: int,
+):
+ """
+ Method which optimizes the Read-out fidelity by varying the Read-out pulse frequency.
+ Two analogous tests are performed for calibrate the ground state and the excited state of the oscillator.
+ The subscripts `exc` and `gnd` will represent the excited state |1> and the ground state |0>.
+ Their distinctiveness is then associated to the fidelity.
+
+ Args:
+ platform (:class:`qibolab.platforms.abstract.AbstractPlatform`): custom abstract platform on which we perform the calibration.
+ qubits (dict): List of target qubits to perform the action
+ nshots (int): number of times the pulse sequence will be repeated.
+ frequency_width (float): width of the frequency range to be swept in Hz.
+ frequency_step (float): step of the frequency range to be swept in Hz.
+ Returns:
+ A DataUnits object with the raw data obtained for the fast and precision sweeps with the following keys
+
+ - **MSR[V]**: Resonator signal voltage mesurement in volts
+ - **i[V]**: Resonator signal voltage mesurement for the component I in volts
+ - **q[V]**: Resonator signal voltage mesurement for the component Q in volts
+ - **phase[rad]**: Resonator signal phase mesurement in radians
+ - **iteration**: Execution number
+ - **qubit**: The qubit being tested
+ - **state**: The state of the qubit being tested
+ - **frequency[Hz]**: The frequency of the readout being tested
+ - **delta_frequency[Hz]**: The frequency offset from the runcard value
+
+ """
+
+ # reload instrument settings from runcard
+ platform.reload_settings()
+
+ # create two sequences of pulses:
+ # state0_sequence: I - MZ
+ # state1_sequence: RX - MZ
+
+ # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel
+ state0_sequence = PulseSequence()
+ state1_sequence = PulseSequence()
+
+ RX_pulses = {}
+ ro_pulses = {}
+ for qubit in qubits:
+ RX_pulses[qubit] = platform.create_RX_pulse(qubit, start=0)
+ ro_pulses[qubit] = platform.create_qubit_readout_pulse(
+ qubit, start=RX_pulses[qubit].finish
+ )
+
+ state0_sequence.add(ro_pulses[qubit])
+ state1_sequence.add(RX_pulses[qubit])
+ state1_sequence.add(ro_pulses[qubit])
+ sequences = {0: state0_sequence, 1: state1_sequence}
+ # create a DataUnits object to store the results
+ data = DataUnits(
+ name="data",
+ quantities={"frequency": "Hz", "delta_frequency": "Hz"},
+ options=["qubit", "iteration", "state"],
+ )
+
+ # iterate over the frequency range
+ delta_frequency_range = np.arange(
+ -frequency_width / 2, frequency_width / 2, frequency_step
+ )
+
+ frequency_sweeper = Sweeper(
+ Parameter.frequency,
+ delta_frequency_range,
+ pulses=[ro_pulses[qubit] for qubit in qubits],
+ )
+
+ # Execute sequences for both states
+ for state in [0, 1]:
+ results = platform.sweep(
+ sequences[state], frequency_sweeper, nshots=nshots, average=False
+ )
+
+ # retrieve and store the results for every qubit)
+ for qubit in qubits:
+ r = {k: v.ravel() for k, v in results[ro_pulses[qubit].serial].raw.items()}
+ r.update(
+ {
+ "frequency[Hz]": np.repeat(
+ np.vstack(delta_frequency_range).T,
+ nshots,
+ axis=0,
+ ).flatten()
+ + ro_pulses[qubit].frequency,
+ "delta_frequency[Hz]": np.repeat(
+ np.vstack(delta_frequency_range).T,
+ nshots,
+ axis=0,
+ ).flatten(),
+ "qubit": [qubit] * nshots * len(delta_frequency_range),
+ "iteration": np.repeat(
+ np.vstack(np.arange(nshots)).T,
+ len(delta_frequency_range),
+ axis=1,
+ ).flatten(),
+ "state": [state] * nshots * len(delta_frequency_range),
+ }
+ )
+ data.add_data_from_dict(r)
+
+ # finally, save the remaining data and the fits
+ yield data
+ yield ro_optimization_fit(data, "state", "qubit", "iteration", "delta_frequency")
+
+
+@plot("Qubit States", plots.ro_amplitude)
+def ro_amplitude(
+ platform: AbstractPlatform,
+ qubits: dict,
+ amplitude_factor_min: float,
+ amplitude_factor_max: float,
+ amplitude_factor_step: float,
+ nshots: int,
+):
+ """
+ Method which optimizes the Read-out fidelity by varying the Read-out pulse amplitude.
+ Two analogous tests are performed for calibrate the ground state and the excited state of the oscillator.
+ The subscripts `exc` and `gnd` will represent the excited state |1> and the ground state |0>.
+ Their distinctiveness is then associated to the fidelity.
+
+ Args:
+ platform (:class:`qibolab.platforms.abstract.AbstractPlatform`): custom abstract platform on which we perform the calibration.
+ qubits (dict): List of target qubits to perform the action
+ nshots (int): number of times the pulse sequence will be repeated.
+ amplitude_factor_min (float): minimum amplitude factor to be swept.
+ amplitude_factor_max (float): maximum amplitude factor to be swept.
+ amplitude_factor_step (float): step of the amplitude factor to be swept.
+ Returns:
+ A DataUnits object with the raw data obtained for the fast and precision sweeps with the following keys
+
+ - **MSR[V]**: Resonator signal voltage mesurement in volts
+ - **i[V]**: Resonator signal voltage mesurement for the component I in volts
+ - **q[V]**: Resonator signal voltage mesurement for the component Q in volts
+ - **phase[rad]**: Resonator signal phase mesurement in radians
+ - **qubit**: The qubit being tested
+ - **iteration**: Execution number
+ - **state**: The state of the qubit being tested
+ - **amplitude_factor**: The amplitude factor of the readout being tested
+ - **delta_amplitude_factor**: The amplitude offset from the runcard value
+
+ """
+
+ # reload instrument settings from runcard
+ platform.reload_settings()
+
+ # create two sequences of pulses:
+ # state0_sequence: I - MZ
+ # state1_sequence: RX - MZ
+
+ # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel
+ state0_sequence = PulseSequence()
+ state1_sequence = PulseSequence()
+
+ RX_pulses = {}
+ ro_pulses = {}
+ for qubit in qubits:
+ RX_pulses[qubit] = platform.create_RX_pulse(qubit, start=0)
+ ro_pulses[qubit] = platform.create_qubit_readout_pulse(
+ qubit, start=RX_pulses[qubit].finish
+ )
+
+ state0_sequence.add(ro_pulses[qubit])
+ state1_sequence.add(RX_pulses[qubit])
+ state1_sequence.add(ro_pulses[qubit])
+ sequences = {0: state0_sequence, 1: state1_sequence}
+ # create a DataUnits object to store the results
+ data = DataUnits(
+ name="data",
+ quantities={"amplitude": "dimensionless", "delta_amplitude": "dimensionless"},
+ options=["qubit", "iteration", "state"],
+ )
+
+ # iterate over the amplitude range
+ delta_amplitude_range = np.arange(
+ amplitude_factor_min, amplitude_factor_max, amplitude_factor_step
+ )
+
+ amplitude_sweeper = Sweeper(
+ Parameter.amplitude,
+ delta_amplitude_range,
+ pulses=[ro_pulses[qubit] for qubit in qubits],
+ )
+
+ # Execute sequences for both states
+ for state in [0, 1]:
+ results = platform.sweep(
+ sequences[state], amplitude_sweeper, nshots=nshots, average=False
+ )
+
+ # retrieve and store the results for every qubit)
+ for qubit in qubits:
+ r = {k: v.ravel() for k, v in results[ro_pulses[qubit].serial].raw.items()}
+ r.update(
+ {
+ "amplitude[dimensionless]": np.repeat(
+ np.vstack(delta_amplitude_range).T,
+ nshots,
+ axis=0,
+ ).flatten()
+ * ro_pulses[qubit].amplitude,
+ "delta_amplitude[dimensionless]": np.repeat(
+ np.vstack(delta_amplitude_range).T,
+ nshots,
+ axis=0,
+ ).flatten(),
+ "qubit": [qubit] * nshots * len(delta_amplitude_range),
+ "iteration": np.repeat(
+ np.vstack(np.arange(nshots)).T,
+ len(delta_amplitude_range),
+ axis=1,
+ ).flatten(),
+ "state": [state] * nshots * len(delta_amplitude_range),
+ }
+ )
+ data.add_data_from_dict(r)
+
+ # finally, save the remaining data and the fits
+ yield data
+ yield ro_optimization_fit(data, "state", "qubit", "iteration", "delta_amplitude")
+
+
+@plot("TWPA frequency", plots.ro_frequency)
+def twpa_frequency(
+ platform: AbstractPlatform,
+ qubits: dict,
+ frequency_width: float,
+ frequency_step: float,
+ nshots: int,
+):
+ """
+ Method which optimizes the Read-out fidelity by varying the frequency of the TWPA.
+ Two analogous tests are performed for calibrate the ground state and the excited state of the oscillator.
+ The subscripts `exc` and `gnd` will represent the excited state |1> and the ground state |0>.
+ Their distinctiveness is then associated to the fidelity.
+
+ Args:
+ platform (:class:`qibolab.platforms.abstract.AbstractPlatform`): custom abstract platform on which we perform the calibration.
+ qubits (dict): List of target qubits to perform the action
+ frequency_width (float): Frequency range to sweep in Hz
+ frequency_step (float): Frequency step to sweep in Hz
+ nshots (int): number of times the pulse sequence will be repeated.
+ Returns:
+ A DataUnits object with the raw data obtained for the fast and precision sweeps with the following keys
+
+ - **MSR[V]**: Resonator signal voltage mesurement in volts
+ - **i[V]**: Resonator signal voltage mesurement for the component I in volts
+ - **q[V]**: Resonator signal voltage mesurement for the component Q in volts
+ - **phase[rad]**: Resonator signal phase mesurement in radians
+ - **qubit**: The qubit being tested
+ - **iteration**: Execution number
+ - **state**: The state of the qubit being tested
+ - **frequency**: The frequency of the TWPA being tested
+ - **delta_frequency**: The frequency offset from the runcard value
+
+ """
+
+ # reload instrument settings from runcard
+ platform.reload_settings()
+
+ # create two sequences of pulses:
+ # state0_sequence: I - MZ
+ # state1_sequence: RX - MZ
+
+ # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel
+ state0_sequence = PulseSequence()
+ state1_sequence = PulseSequence()
+
+ RX_pulses = {}
+ ro_pulses = {}
+ initial_frequency = {}
+ for qubit in qubits:
+ RX_pulses[qubit] = platform.create_RX_pulse(qubit, start=0)
+ ro_pulses[qubit] = platform.create_qubit_readout_pulse(
+ qubit, start=RX_pulses[qubit].finish
+ )
+ initial_frequency[qubit] = platform.get_lo_twpa_frequency(qubit)
+
+ state0_sequence.add(ro_pulses[qubit])
+ state1_sequence.add(RX_pulses[qubit])
+ state1_sequence.add(ro_pulses[qubit])
+ sequences = {0: state0_sequence, 1: state1_sequence}
+ # create a DataUnits object to store the results
+ data = DataUnits(
+ name="data",
+ quantities={"frequency": "Hz", "delta_frequency": "Hz"},
+ options=["qubit", "iteration", "state"],
+ )
+
+ # iterate over the frequency range
+ delta_frequency_range = np.arange(
+ -frequency_width / 2, frequency_width / 2, frequency_step
+ ).astype(int)
+
+ # retrieve and store the results for every qubit
+ for frequency in delta_frequency_range:
+ for qubit in qubits:
+ platform.set_lo_twpa_frequency(qubit, initial_frequency[qubit] + frequency)
+
+ # Execute the sequences for both states
+ for state in [0, 1]:
+ results = platform.execute_pulse_sequence(sequences[state], nshots=nshots)
+ for qubit in qubits:
+ r = results[ro_pulses[qubit].serial].raw
+ r.update(
+ {
+ "frequency[Hz]": [platform.get_lo_twpa_frequency(qubit)]
+ * nshots,
+ "delta_frequency[Hz]": [frequency] * nshots,
+ "qubit": [qubit] * nshots,
+ "iteration": np.arange(nshots),
+ "state": [state] * nshots,
+ }
+ )
+ data.add_data_from_dict(r)
+
+ # finally, save the remaining data and the fits
+ yield data
+ yield ro_optimization_fit(
+ data, "delta_frequency", "state", "qubit", "iteration"
+ )
+
+
+@plot("TWPA power", plots.ro_power)
+def twpa_power(
+ platform: AbstractPlatform,
+ qubits: dict,
+ power_width: float,
+ power_step: float,
+ nshots: int,
+):
+ """
+ Method which optimizes the Read-out fidelity by varying the power of the TWPA.
+ Two analogous tests are performed for calibrate the ground state and the excited state of the oscillator.
+ The subscripts `exc` and `gnd` will represent the excited state |1> and the ground state |0>.
+ Their distinctiveness is then associated to the fidelity.
+
+ Args:
+ platform (:class:`qibolab.platforms.abstract.AbstractPlatform`): custom abstract platform on which we perform the calibration.
+ qubits (dict): List of target qubits to perform the action
+ power_width (float): width of the power range to be scanned in dBm
+ power_step (float): step of the power range to be scanned in dBm
+ nshots (int): number of times the pulse sequence will be repeated.
+ Returns:
+ A DataUnits object with the raw data obtained for the fast and precision sweeps with the following keys
+
+ - **MSR[V]**: Resonator signal voltage mesurement in volts
+ - **i[V]**: Resonator signal voltage mesurement for the component I in volts
+ - **q[V]**: Resonator signal voltage mesurement for the component Q in volts
+ - **phase[rad]**: Resonator signal phase mesurement in radians
+ - **qubit**: The qubit being tested
+ - **iteration**: Execution number
+ - **state**: The state of the qubit being tested
+ - **power**: The power of the TWPA being tested
+ - **delta_power**: The power offset from the runcard value
+
+ """
+
+ # reload instrument settings from runcard
+ platform.reload_settings()
+
+ # create two sequences of pulses:
+ # state0_sequence: I - MZ
+ # state1_sequence: RX - MZ
+
+ # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel
+ state0_sequence = PulseSequence()
+ state1_sequence = PulseSequence()
+
+ RX_pulses = {}
+ ro_pulses = {}
+ initial_power = {}
+ for qubit in qubits:
+ RX_pulses[qubit] = platform.create_RX_pulse(qubit, start=0)
+ ro_pulses[qubit] = platform.create_qubit_readout_pulse(
+ qubit, start=RX_pulses[qubit].finish
+ )
+ initial_power[qubit] = platform.get_lo_twpa_power(qubit)
+
+ state0_sequence.add(ro_pulses[qubit])
+ state1_sequence.add(RX_pulses[qubit])
+ state1_sequence.add(ro_pulses[qubit])
+ sequences = {0: state0_sequence, 1: state1_sequence}
+ # create a DataUnits object to store the results
+ data = DataUnits(
+ name="data",
+ quantities={"power": "dBm", "delta_power": "dBm"},
+ options=["qubit", "iteration", "state"],
+ )
+
+ # iterate over the power range
+ delta_power_range = np.arange(-power_width / 2, power_width / 2, power_step)
+
+ # retrieve and store the results for every qubit
+ for power in delta_power_range:
+ for qubit in qubits:
+ platform.set_lo_twpa_power(qubit, initial_power[qubit] + power)
+
+ # Execute the sequences for both states
+ for state in [0, 1]:
+ results = platform.execute_pulse_sequence(sequences[state], nshots=nshots)
+ for qubit in qubits:
+ r = results[ro_pulses[qubit].serial].raw
+ r.update(
+ {
+ "power[dBm]": [platform.get_lo_twpa_power(qubit)] * nshots,
+ "delta_power[dBm]": [power] * nshots,
+ "qubit": [qubit] * nshots,
+ "iteration": np.arange(nshots),
+ "state": [state] * nshots,
+ }
+ )
+ data.add_data_from_dict(r)
+
+ # finally, save the remaining data and the fits
+ yield data
+ yield ro_optimization_fit(data, "delta_power", "state", "qubit", "iteration")
diff --git a/src/qibocal/calibrations/niGSC/XIdrb.py b/src/qibocal/calibrations/niGSC/XIdrb.py
index 4250f2992..89debef3f 100644
--- a/src/qibocal/calibrations/niGSC/XIdrb.py
+++ b/src/qibocal/calibrations/niGSC/XIdrb.py
@@ -74,7 +74,7 @@ def __init__(
def execute(self, circuit: Circuit, datarow: dict) -> dict:
datadict = super().execute(circuit, datarow)
datadict["depth"] = circuit.ngates - 1
- # TODO change to circuit.gate_types["x"] for next Qibo version
+ # TODO change that.
datadict["countX"] = len(circuit.gates_of_type("x"))
return datadict
diff --git a/src/qibocal/calibrations/niGSC/basics/noisemodels.py b/src/qibocal/calibrations/niGSC/basics/noisemodels.py
index 457092ca4..6bb8a74ec 100644
--- a/src/qibocal/calibrations/niGSC/basics/noisemodels.py
+++ b/src/qibocal/calibrations/niGSC/basics/noisemodels.py
@@ -34,6 +34,7 @@ def __init__(self, *args) -> None:
self.build(*params)
def build(self, *params):
+ # TODO for qibo v.0.1.14 change *params to list(zip(["X", "Y", "Z"], params))
# Add PauliError to gates.Gate
self.add(PauliError(*params))
diff --git a/src/qibocal/fitting/classifier/qblox_fit.py b/src/qibocal/fitting/classifier/qblox_fit.py
new file mode 100644
index 000000000..5fd001bb4
--- /dev/null
+++ b/src/qibocal/fitting/classifier/qblox_fit.py
@@ -0,0 +1,59 @@
+from dataclasses import dataclass
+
+import numpy as np
+import numpy.typing as npt
+
+from .utils import identity
+
+
+def constructor(_hyperparams):
+ r"""Return the model class.
+
+ Args:
+ _hyperparams: Model hyperparameters.
+ """
+ return QbloxFit()
+
+
+def hyperopt(_x_train, _y_train, _path):
+ r"""Perform an hyperparameter optimization and return the hyperparameters.
+
+ Args:
+ x_train: Training inputs.
+ y_train: Training outputs.
+ path (path): Model save path.
+
+ Returns:
+ Dictionary with model's hyperparameters.
+ """
+ return {}
+
+
+normalize = identity
+
+
+@dataclass
+class QbloxFit:
+ r"""This class deploys the Qblox qubit state classifier described
+ in the [documentation](https://qblox-qblox-instruments.readthedocs-hosted.com/en/master/tutorials/conditional_playback.html#Measure-qubit-histogram).
+
+ Args:
+ threshold (float): Classifier's threshold.
+ angle (float): Rotational angle.
+
+ """
+
+ threshold: float = 0.0
+ angle: float = 0.0
+
+ def fit(self, iq_coordinates, states: list):
+ state1 = [complex(*i) for i in iq_coordinates[(states == 1)]]
+ state0 = [complex(*i) for i in iq_coordinates[(states == 0)]]
+ self.angle = np.mod(-np.angle(np.mean(state1) - np.mean(state0)), 2 * np.pi)
+ self.threshold = (
+ np.exp(1j * self.angle) * (np.mean(state1) + np.mean(state0))
+ ).real / 2
+
+ def predict(self, inputs: npt.NDArray):
+ inputs = np.array([complex(*i) for i in inputs])
+ return ((np.exp(1j * self.angle) * inputs).real > self.threshold).astype(int)
diff --git a/src/qibocal/fitting/classifier/run.py b/src/qibocal/fitting/classifier/run.py
index c777b37c3..7f90d6dd6 100644
--- a/src/qibocal/fitting/classifier/run.py
+++ b/src/qibocal/fitting/classifier/run.py
@@ -22,6 +22,7 @@
"qubit_fit",
"random_forest",
"rbf_svm",
+ "qblox_fit",
]
diff --git a/src/qibocal/fitting/methods.py b/src/qibocal/fitting/methods.py
index e0a4a6360..71e79dc94 100644
--- a/src/qibocal/fitting/methods.py
+++ b/src/qibocal/fitting/methods.py
@@ -3,12 +3,15 @@
import lmfit
import numpy as np
+import pandas as pd
+import pint
from scipy.optimize import curve_fit
from qibocal.config import log
from qibocal.data import Data
from qibocal.fitting.utils import (
cos,
+ cumulative,
exp,
flipping,
freq_q_mathieu,
@@ -17,6 +20,7 @@
line,
lorenzian,
parse,
+ pint_to_float,
rabi,
ramsey,
)
@@ -1025,14 +1029,11 @@ def calibrate_qubit_states_fit(data, x, y, nshots, qubits, degree=True):
iq_mean_state1 = np.mean(iq_state1)
iq_mean_state0 = np.mean(iq_state0)
- origin = iq_mean_state0
- iq_state0_translated = iq_state0 - origin
- iq_state1_translated = iq_state1 - origin
- rotation_angle = np.angle(np.mean(iq_state1_translated))
+ rotation_angle = np.angle(iq_mean_state1 - iq_mean_state0)
- iq_state1_rotated = iq_state1_translated * np.exp(-1j * rotation_angle)
- iq_state0_rotated = iq_state0_translated * np.exp(-1j * rotation_angle)
+ iq_state1_rotated = iq_state1 * np.exp(-1j * rotation_angle)
+ iq_state0_rotated = iq_state0 * np.exp(-1j * rotation_angle)
real_values_state1 = iq_state1_rotated.real
real_values_state0 = iq_state0_rotated.real
@@ -1040,14 +1041,8 @@ def calibrate_qubit_states_fit(data, x, y, nshots, qubits, degree=True):
real_values_combined = np.concatenate((real_values_state1, real_values_state0))
real_values_combined.sort()
- cum_distribution_state1 = [
- sum(map(lambda x: x.real >= real_value, real_values_state1))
- for real_value in real_values_combined
- ]
- cum_distribution_state0 = [
- sum(map(lambda x: x.real >= real_value, real_values_state0))
- for real_value in real_values_combined
- ]
+ cum_distribution_state1 = cumulative(real_values_combined, real_values_state1)
+ cum_distribution_state0 = cumulative(real_values_combined, real_values_state0)
cum_distribution_diff = np.abs(
np.array(cum_distribution_state1) - np.array(cum_distribution_state0)
@@ -1060,7 +1055,7 @@ def calibrate_qubit_states_fit(data, x, y, nshots, qubits, degree=True):
assignment_fidelity = 1 - (errors_state1 + errors_state0) / nshots / 2
# assignment_fidelity = 1/2 + (cum_distribution_state1[argmax] - cum_distribution_state0[argmax])/nshots/2
if degree:
- rotation_angle = (rotation_angle * 360 / (2 * np.pi)) % 360
+ rotation_angle = (-rotation_angle * 360 / (2 * np.pi)) % 360
results = {
"rotation_angle": rotation_angle,
@@ -1073,3 +1068,128 @@ def calibrate_qubit_states_fit(data, x, y, nshots, qubits, degree=True):
}
parameters.add(results)
return parameters
+
+
+def ro_optimization_fit(data, *labels, debug=False):
+ """
+ Fit the fidelities from parameters swept as labels, and extract rotation angle and threshold
+
+ Args:
+ data (Data): data to fit
+ labels (str): variable used in the routine with format "variable_name"
+
+ Returns:
+ Data: data with the fit results
+ """
+ quantities = [
+ *labels,
+ "rotation_angle",
+ "threshold",
+ "fidelity",
+ "assignment_fidelity",
+ "average_state0",
+ "average_state1",
+ ]
+ data_fit = Data(
+ name="fit",
+ quantities=quantities,
+ )
+
+ # Create a ndarray for i and q shots for all labels
+ # shape=(i + j*q, qubit, state, label1, label2, ...)
+
+ shape = (*[len(data.df[label].unique()) for label in labels],)
+ nb_shots = len(data.df["iteration"].unique())
+
+ iq_complex = data.df["i"].pint.magnitude.to_numpy().reshape(shape) + 1j * data.df[
+ "q"
+ ].pint.magnitude.to_numpy().reshape(shape)
+
+ # Move state to 0, and iteration to -1
+ labels = list(labels)
+ iq_complex = np.moveaxis(
+ iq_complex, [labels.index("state"), labels.index("iteration")], [0, -1]
+ )
+ labels.remove("state")
+ labels.remove("iteration")
+ labels = ["state"] + labels + ["iteration"]
+
+ # Take the mean ground state
+ mean_gnd_state = np.mean(iq_complex[0, ...], axis=-1, keepdims=True)
+ mean_exc_state = np.mean(iq_complex[1, ...], axis=-1, keepdims=True)
+ angle = np.angle(mean_exc_state - mean_gnd_state)
+
+ # Rotate the data
+ iq_complex = iq_complex * np.exp(-1j * angle)
+
+ # Take the cumulative distribution of the real part of the data
+ iq_complex_sorted = np.sort(iq_complex.real, axis=-1)
+
+ def cum_dist(complex_row):
+ state0 = complex_row.real
+ state1 = complex_row.imag
+ combined = np.sort(np.concatenate((state0, state1)))
+
+ # Compute the indices where elements in state0 and state1 would be inserted in combined
+ idx_state0 = np.searchsorted(combined, state0, side="left")
+ idx_state1 = np.searchsorted(combined, state1, side="left")
+
+ # Create a combined histogram for state0 and state1
+ hist_combined = np.bincount(
+ idx_state0, minlength=len(combined)
+ ) + 1j * np.bincount(idx_state1, minlength=len(combined))
+
+ return hist_combined.cumsum()
+
+ cum_dist = (
+ np.apply_along_axis(
+ func1d=cum_dist,
+ axis=-1,
+ arr=iq_complex_sorted[0, ...] + 1j * iq_complex_sorted[1, ...],
+ )
+ / nb_shots
+ )
+
+ # Find the threshold for which the difference between the cumulative distribution of the two states is maximum
+ argmax = np.argmax(np.abs(cum_dist.real - cum_dist.imag), axis=-1, keepdims=True)
+
+ # Use np.take_along_axis to get the correct indices for the threshold calculation
+ threshold = np.take_along_axis(
+ np.concatenate((iq_complex_sorted[0, ...], iq_complex_sorted[1, ...]), axis=-1),
+ argmax,
+ axis=-1,
+ )
+
+ # Calculate the fidelity
+ fidelity = np.take_along_axis(
+ np.abs(cum_dist.real - cum_dist.imag), argmax, axis=-1
+ )
+ assignment_fidelity = (
+ 1
+ - (
+ 1
+ - np.take_along_axis(cum_dist.real, argmax, axis=-1)
+ + np.take_along_axis(cum_dist.imag, argmax, axis=-1)
+ )
+ / 2
+ )
+
+ # Add all the results to the data with labels as subnet without "state", "iteration"
+ data_fit.df = (
+ data.df.drop_duplicates(
+ subset=[i for i in labels if i not in ["state", "iteration"]]
+ )
+ .reset_index(drop=True)
+ .apply(pint_to_float)
+ )
+ data_fit.df["rotation_angle"] = angle.flatten()
+ data_fit.df["threshold"] = threshold.flatten()
+ data_fit.df["fidelity"] = fidelity.flatten()
+ data_fit.df["assignment_fidelity"] = assignment_fidelity.flatten()
+ data_fit.df["average_state0"] = mean_gnd_state.flatten()
+ data_fit.df["average_state1"] = mean_exc_state.flatten()
+
+ if debug:
+ return data_fit, cum_dist, iq_complex
+ else:
+ return data_fit
diff --git a/src/qibocal/fitting/utils.py b/src/qibocal/fitting/utils.py
index 4896f10e3..47768d7bf 100644
--- a/src/qibocal/fitting/utils.py
+++ b/src/qibocal/fitting/utils.py
@@ -1,6 +1,8 @@
import re
import numpy as np
+import pandas as pd
+import pint
from scipy.special import mathieu_a, mathieu_b
@@ -128,3 +130,29 @@ def freq_r_mathieu(x, p0, p1, p2, p3, p4, p5, p6, p7=0.499):
f_q = freq_q_mathieu(x, p2, p3, p4, p5, p6, p7)
f_r = p0 + p1**2 * np.sqrt(G) / (p0 - f_q)
return f_r
+
+
+def pint_to_float(x):
+ if isinstance(x, pd.Series):
+ return x.apply(pint_to_float)
+ elif isinstance(x, pint.Quantity):
+ return x.to(x.units).magnitude
+ else:
+ return x
+
+
+def cumulative(input_data, points):
+ r"""Evaluates in `input_data` the cumulative distribution
+ function of `points`.
+ WARNING: `input_data` and `points` should be sorted data.
+ """
+ input_data_sort = np.sort(input_data)
+ points_sort = np.sort(points)
+
+ prob = []
+ app = 0
+ for val in input_data_sort:
+ app += np.max(np.searchsorted(points_sort[app::], val), 0)
+ prob.append(app)
+
+ return np.array(prob)
diff --git a/src/qibocal/plots/__init__.py b/src/qibocal/plots/__init__.py
index 849767909..784ca139a 100644
--- a/src/qibocal/plots/__init__.py
+++ b/src/qibocal/plots/__init__.py
@@ -5,6 +5,7 @@
from qibocal.plots.flipping import *
from qibocal.plots.rabi import *
from qibocal.plots.ramsey import *
+from qibocal.plots.ro_optimization import *
from qibocal.plots.spectroscopies import *
from qibocal.plots.spin_echo import *
from qibocal.plots.t1 import *
diff --git a/src/qibocal/plots/ro_optimization.py b/src/qibocal/plots/ro_optimization.py
new file mode 100644
index 000000000..1438e7364
--- /dev/null
+++ b/src/qibocal/plots/ro_optimization.py
@@ -0,0 +1,518 @@
+import numpy as np
+import plotly.graph_objects as go
+
+from qibocal.data import Data, DataUnits
+from qibocal.plots.utils import get_color_state0, get_color_state1, get_data_subfolders
+
+
+# Plot RO optimization with frequency
+def ro_frequency(folder, routine, qubit, format):
+ fig = go.Figure()
+
+ # iterate over multiple data folders
+ subfolder = get_data_subfolders(folder)[0]
+ report_n = 0
+ fitting_report = ""
+
+ try:
+ data = DataUnits.load_data(folder, subfolder, routine, format, "data")
+ data.df = data.df[data.df["qubit"] == qubit]
+ except:
+ data = DataUnits(
+ name="data",
+ quantities={"frequency": "Hz", "delta_frequency": "Hz"},
+ options=["iteration", "state"],
+ )
+
+ try:
+ data_fit = Data.load_data(folder, subfolder, routine, format, "fit")
+ data_fit.df = data_fit.df[data_fit.df["qubit"] == qubit]
+
+ except:
+ data_fit = Data(
+ name="fit",
+ quantities=[
+ "frequency",
+ "delta_frequency",
+ "rotation_angle",
+ "threshold",
+ "fidelity",
+ "assignment_fidelity",
+ "average_state0",
+ "average_state1",
+ ],
+ )
+
+ # Plot raw results with sliders
+ for frequency in data.df["delta_frequency"].unique():
+ state0_data = data.df[
+ (data.df["delta_frequency"] == frequency) & (data.df["state"] == 0)
+ ]
+ state1_data = data.df[
+ (data.df["delta_frequency"] == frequency) & (data.df["state"] == 1)
+ ]
+ fit_data = data_fit.df[data_fit.df["delta_frequency"] == frequency.magnitude]
+ fit_data["average_state0"] = data_fit.df["average_state0"].apply(
+ lambda x: complex(x)
+ )
+ fit_data["average_state1"] = data_fit.df["average_state1"].apply(
+ lambda x: complex(x)
+ )
+
+ # print(fit_data)
+ fig.add_trace(
+ go.Scatter(
+ x=state0_data["i"].pint.to("V").pint.magnitude,
+ y=state0_data["q"].pint.to("V").pint.magnitude,
+ name=f"q{qubit}/r{report_n}: state 0",
+ mode="markers",
+ showlegend=True,
+ opacity=0.7,
+ marker=dict(size=3, color=get_color_state0(report_n)),
+ visible=False,
+ ),
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=state1_data["i"].pint.to("V").pint.magnitude,
+ y=state1_data["q"].pint.to("V").pint.magnitude,
+ name=f"q{qubit}/r{report_n}: state 1",
+ mode="markers",
+ showlegend=True,
+ opacity=0.7,
+ marker=dict(size=3, color=get_color_state1(report_n)),
+ visible=False,
+ ),
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=fit_data["average_state0"].apply(lambda x: np.real(x)).to_numpy(),
+ y=fit_data["average_state0"].apply(lambda x: np.imag(x)).to_numpy(),
+ name=f"q{qubit}/r{report_n}: mean state 0",
+ showlegend=True,
+ visible=False,
+ mode="markers",
+ marker=dict(size=10, color=get_color_state0(report_n)),
+ ),
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=fit_data["average_state1"].apply(lambda x: np.real(x)).to_numpy(),
+ y=fit_data["average_state1"].apply(lambda x: np.imag(x)).to_numpy(),
+ name=f"avg q{qubit}/r{report_n}: mean state 1",
+ showlegend=True,
+ visible=False,
+ mode="markers",
+ marker=dict(size=10, color=get_color_state1(report_n)),
+ ),
+ )
+
+ # Show data for the first frequency
+ for i in range(4):
+ fig.data[i].visible = True
+
+ # Add slider
+ steps = []
+ for i, freq in enumerate(data.df["frequency"].unique()):
+ step = dict(
+ method="update",
+ args=[
+ {"visible": [False] * len(fig.data)},
+ ],
+ label=f"{freq:.6f}",
+ )
+ for j in range(4):
+ step["args"][0]["visible"][i * 4 + j] = True
+ steps.append(step)
+
+ sliders = [
+ dict(
+ currentvalue={"prefix": "frequency: "},
+ steps=steps,
+ )
+ ]
+
+ fig.update_layout(
+ showlegend=True,
+ uirevision="0", # ``uirevision`` allows zooming while live plotting
+ xaxis_title="i (V)",
+ yaxis_title="q (V)",
+ sliders=sliders,
+ title=f"q{qubit}",
+ )
+ fig.update_yaxes(
+ scaleanchor="x",
+ scaleratio=1,
+ )
+
+ # Plot the fidelity as a function of frequency
+ fig_fidelity = go.Figure()
+
+ fig_fidelity.add_trace(
+ go.Scatter(x=data_fit.df["frequency"], y=data_fit.df["assignment_fidelity"])
+ )
+ fig_fidelity.update_layout(
+ showlegend=True,
+ uirevision="0", # ``uirevision`` allows zooming while live plotting
+ xaxis_title="delta frequency (Hz)",
+ yaxis_title="assignment fidelity (ratio)",
+ title=f"q{qubit}",
+ )
+ # Add fitting report for the best fidelity
+ fit_data = data_fit.df[data_fit.df["fidelity"] == data_fit.df["fidelity"].max()]
+ title_text = f"q{qubit}/r{report_n} | average state 0: ({complex(fit_data['average_state0'].to_numpy()[0]):.6f})
"
+ title_text += f"q{qubit}/r{report_n} | average state 1: ({complex(fit_data['average_state1'].to_numpy()[0]):.6f})
"
+ title_text += f"q{qubit}/r{report_n} | rotation angle: {float(fit_data['rotation_angle'].to_numpy()[0]):.3f} | threshold = {float(fit_data['threshold'].to_numpy()[0]):.6f}
"
+ title_text += f"q{qubit}/r{report_n} | fidelity: {float(fit_data['fidelity'].to_numpy()[0]):.3f}
"
+ title_text += f"q{qubit}/r{report_n} | assignment fidelity: {float(fit_data['assignment_fidelity'].to_numpy()[0]):.3f}
"
+ title_text += f"q{qubit}/r{report_n} | optimal frequency: {float(fit_data['frequency'].to_numpy()[0]):.3f} Hz
"
+ fitting_report = fitting_report + title_text
+ return [fig, fig_fidelity], fitting_report
+
+
+# Plot RO optimization with amplitude
+def ro_amplitude(folder, routine, qubit, format):
+ fig = go.Figure()
+
+ # iterate over multiple data folders
+ subfolder = get_data_subfolders(folder)[0]
+ report_n = 0
+ fitting_report = ""
+
+ try:
+ data = DataUnits.load_data(folder, subfolder, routine, format, "data")
+ data.df = data.df[data.df["qubit"] == qubit]
+ except:
+ data = DataUnits(
+ name="data",
+ quantities={
+ "amplitude": "dimensionless",
+ "delta_amplitude": "dimensionless",
+ },
+ options=["iteration", "state"],
+ )
+
+ try:
+ data_fit = Data.load_data(folder, subfolder, routine, format, "fit")
+ data_fit.df = data_fit.df[data_fit.df["qubit"] == qubit]
+
+ except:
+ data_fit = Data(
+ name="fit",
+ quantities=[
+ "amplitude",
+ "delta_amplitude",
+ "rotation_angle",
+ "threshold",
+ "fidelity",
+ "assignment_fidelity",
+ "average_state0",
+ "average_state1",
+ ],
+ )
+
+ # Plot raw results with sliders
+ for amplitude in data.df["delta_amplitude"].unique():
+ state0_data = data.df[
+ (data.df["delta_amplitude"] == amplitude) & (data.df["state"] == 0)
+ ]
+ state1_data = data.df[
+ (data.df["delta_amplitude"] == amplitude) & (data.df["state"] == 1)
+ ]
+ fit_data = data_fit.df[data_fit.df["delta_amplitude"] == amplitude.magnitude]
+ fit_data["average_state0"] = data_fit.df["average_state0"].apply(
+ lambda x: complex(x)
+ )
+ fit_data["average_state1"] = data_fit.df["average_state1"].apply(
+ lambda x: complex(x)
+ )
+
+ # print(fit_data)
+ fig.add_trace(
+ go.Scatter(
+ x=state0_data["i"].pint.to("V").pint.magnitude,
+ y=state0_data["q"].pint.to("V").pint.magnitude,
+ name=f"q{qubit}/r{report_n}: state 0",
+ mode="markers",
+ showlegend=True,
+ opacity=0.7,
+ marker=dict(size=3, color=get_color_state0(report_n)),
+ visible=False,
+ ),
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=state1_data["i"].pint.to("V").pint.magnitude,
+ y=state1_data["q"].pint.to("V").pint.magnitude,
+ name=f"q{qubit}/r{report_n}: state 1",
+ mode="markers",
+ showlegend=True,
+ opacity=0.7,
+ marker=dict(size=3, color=get_color_state1(report_n)),
+ visible=False,
+ ),
+ )
+ fig.add_trace(
+ go.Scatter(
+ x=fit_data["average_state0"].apply(lambda x: np.real(x)).to_numpy(),
+ y=fit_data["average_state0"].apply(lambda x: np.imag(x)).to_numpy(),
+ name=f"q{qubit}/r{report_n}: mean state 0",
+ showlegend=True,
+ visible=False,
+ mode="markers",
+ marker=dict(size=10, color=get_color_state0(report_n)),
+ ),
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=fit_data["average_state1"].apply(lambda x: np.real(x)).to_numpy(),
+ y=fit_data["average_state1"].apply(lambda x: np.imag(x)).to_numpy(),
+ name=f"avg q{qubit}/r{report_n}: mean state 1",
+ showlegend=True,
+ visible=False,
+ mode="markers",
+ marker=dict(size=10, color=get_color_state1(report_n)),
+ ),
+ )
+
+ report_n += 1
+
+ # Show data for the first amplitude
+ for i in range(4):
+ fig.data[i].visible = True
+
+ # Add slider
+ steps = []
+ for i, amp in enumerate(data.df["amplitude"].unique()):
+ step = dict(
+ method="update",
+ args=[
+ {"visible": [False] * len(fig.data)},
+ ],
+ label=f"{amp.magnitude:.4f}",
+ )
+ for j in range(4):
+ step["args"][0]["visible"][i * 4 + j] = True
+ steps.append(step)
+
+ sliders = [
+ dict(
+ currentvalue={"prefix": "amplitude: "},
+ steps=steps,
+ )
+ ]
+
+ fig.update_layout(
+ showlegend=True,
+ uirevision="0", # ``uirevision`` allows zooming while live plotting
+ xaxis_title="i (V)",
+ yaxis_title="q (V)",
+ sliders=sliders,
+ title=f"q{qubit}",
+ )
+ fig.update_yaxes(
+ scaleanchor="x",
+ scaleratio=1,
+ )
+
+ # Plot the fidelity as a function of amplitude
+ fig_fidelity = go.Figure()
+
+ fig_fidelity.add_trace(
+ go.Scatter(x=data_fit.df["amplitude"], y=data_fit.df["assignment_fidelity"])
+ )
+ fig_fidelity.update_layout(
+ showlegend=True,
+ uirevision="0", # ``uirevision`` allows zooming while live plotting
+ xaxis_title="delta amplitude (dimensionless)",
+ yaxis_title="assignment fidelity (ratio)",
+ title=f"q{qubit}",
+ )
+ # Add fitting report for the best fidelity
+ fit_data = data_fit.df[data_fit.df["fidelity"] == data_fit.df["fidelity"].max()]
+ title_text = f"q{qubit}/r{report_n} | average state 0: ({complex(fit_data['average_state0'].to_numpy()[0]):.6f})
"
+ title_text += f"q{qubit}/r{report_n} | average state 1: ({complex(fit_data['average_state1'].to_numpy()[0]):.6f})
"
+ title_text += f"q{qubit}/r{report_n} | rotation angle: {float(fit_data['rotation_angle'].to_numpy()[0]):.3f} | threshold = {float(fit_data['threshold'].to_numpy()[0]):.6f}
"
+ title_text += f"q{qubit}/r{report_n} | fidelity: {float(fit_data['fidelity'].to_numpy()[0]):.3f}
"
+ title_text += f"q{qubit}/r{report_n} | assignment fidelity: {float(fit_data['assignment_fidelity'].to_numpy()[0]):.3f}
"
+ title_text += f"q{qubit}/r{report_n} | optimal amplitude: {float(fit_data['amplitude'].to_numpy()[0]):.3f}
"
+ fitting_report = fitting_report + title_text
+ return [fig, fig_fidelity], fitting_report
+
+
+# Plot RO optimization with power
+# Plot RO optimization with amplitude
+def ro_power(folder, routine, qubit, format):
+ fig = go.Figure()
+
+ # iterate over multiple data folders
+ subfolder = get_data_subfolders(folder)[0]
+ report_n = 0
+ fitting_report = ""
+
+ try:
+ data = DataUnits.load_data(folder, subfolder, routine, format, "data")
+ data.df = data.df[data.df["qubit"] == qubit]
+ except:
+ data = DataUnits(
+ name="data",
+ quantities={
+ "power": "dBm",
+ "delta_power": "dBm",
+ },
+ options=["iteration", "state"],
+ )
+
+ try:
+ data_fit = Data.load_data(folder, subfolder, routine, format, "fit")
+ data_fit.df = data_fit.df[data_fit.df["qubit"] == qubit]
+
+ except:
+ data_fit = Data(
+ name="fit",
+ quantities=[
+ "power",
+ "delta_power",
+ "rotation_angle",
+ "threshold",
+ "fidelity",
+ "assignment_fidelity",
+ "average_state0",
+ "average_state1",
+ ],
+ )
+
+ # Plot raw results with sliders
+ for power in data.df["delta_power"].unique():
+ state0_data = data.df[
+ (data.df["delta_power"] == power) & (data.df["state"] == 0)
+ ]
+ state1_data = data.df[
+ (data.df["delta_power"] == power) & (data.df["state"] == 1)
+ ]
+ fit_data = data_fit.df[data_fit.df["delta_power"] == power.magnitude]
+ fit_data["average_state0"] = data_fit.df["average_state0"].apply(
+ lambda x: complex(x)
+ )
+ fit_data["average_state1"] = data_fit.df["average_state1"].apply(
+ lambda x: complex(x)
+ )
+
+ # print(fit_data)
+ fig.add_trace(
+ go.Scatter(
+ x=state0_data["i"].pint.to("V").pint.magnitude,
+ y=state0_data["q"].pint.to("V").pint.magnitude,
+ name=f"q{qubit}/r{report_n}: state 0",
+ mode="markers",
+ showlegend=True,
+ opacity=0.7,
+ marker=dict(size=3, color=get_color_state0(report_n)),
+ visible=False,
+ ),
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=state1_data["i"].pint.to("V").pint.magnitude,
+ y=state1_data["q"].pint.to("V").pint.magnitude,
+ name=f"q{qubit}/r{report_n}: state 1",
+ mode="markers",
+ showlegend=True,
+ opacity=0.7,
+ marker=dict(size=3, color=get_color_state1(report_n)),
+ visible=False,
+ ),
+ )
+ fig.add_trace(
+ go.Scatter(
+ x=fit_data["average_state0"].apply(lambda x: np.real(x)).to_numpy(),
+ y=fit_data["average_state0"].apply(lambda x: np.imag(x)).to_numpy(),
+ name=f"q{qubit}/r{report_n}: mean state 0",
+ showlegend=True,
+ visible=False,
+ mode="markers",
+ marker=dict(size=10, color=get_color_state0(report_n)),
+ ),
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=fit_data["average_state1"].apply(lambda x: np.real(x)).to_numpy(),
+ y=fit_data["average_state1"].apply(lambda x: np.imag(x)).to_numpy(),
+ name=f"avg q{qubit}/r{report_n}: mean state 1",
+ showlegend=True,
+ visible=False,
+ mode="markers",
+ marker=dict(size=10, color=get_color_state1(report_n)),
+ ),
+ )
+
+ report_n += 1
+
+ # Show data for the first power
+ for i in range(4):
+ fig.data[i].visible = True
+
+ # Add slider
+ steps = []
+ for i, amp in enumerate(data.df["power"].unique()):
+ step = dict(
+ method="update",
+ args=[
+ {"visible": [False] * len(fig.data)},
+ ],
+ label=f"{amp.magnitude:.4f}",
+ )
+ for j in range(4):
+ step["args"][0]["visible"][i * 4 + j] = True
+ steps.append(step)
+
+ sliders = [
+ dict(
+ currentvalue={"prefix": "power: "},
+ steps=steps,
+ )
+ ]
+
+ fig.update_layout(
+ showlegend=True,
+ uirevision="0", # ``uirevision`` allows zooming while live plotting
+ xaxis_title="i (V)",
+ yaxis_title="q (V)",
+ sliders=sliders,
+ title=f"q{qubit}",
+ )
+ fig.update_yaxes(
+ scaleanchor="x",
+ scaleratio=1,
+ )
+
+ # Plot the fidelity as a function of power
+ fig_fidelity = go.Figure()
+
+ fig_fidelity.add_trace(
+ go.Scatter(x=data_fit.df["power"], y=data_fit.df["assignment_fidelity"])
+ )
+ fig_fidelity.update_layout(
+ showlegend=True,
+ uirevision="0", # ``uirevision`` allows zooming while live plotting
+ xaxis_title="delta power (dBm)",
+ yaxis_title="assignment fidelity (ratio)",
+ title=f"q{qubit}",
+ )
+ # Add fitting report for the best fidelity
+ fit_data = data_fit.df[data_fit.df["fidelity"] == data_fit.df["fidelity"].max()]
+ title_text = f"q{qubit}/r{report_n} | average state 0: ({complex(fit_data['average_state0'].to_numpy()[0]):.6f})
"
+ title_text += f"q{qubit}/r{report_n} | average state 1: ({complex(fit_data['average_state1'].to_numpy()[0]):.6f})
"
+ title_text += f"q{qubit}/r{report_n} | rotation angle: {float(fit_data['rotation_angle'].to_numpy()[0]):.3f} | threshold = {float(fit_data['threshold'].to_numpy()[0]):.6f}
"
+ title_text += f"q{qubit}/r{report_n} | fidelity: {float(fit_data['fidelity'].to_numpy()[0]):.3f}
"
+ title_text += f"q{qubit}/r{report_n} | assignment fidelity: {float(fit_data['assignment_fidelity'].to_numpy()[0]):.3f}
"
+ title_text += f"q{qubit}/r{report_n} | optimal power: {float(fit_data['power'].to_numpy()[0]):.3f} dBm
"
+ fitting_report = fitting_report + title_text
+ return [fig, fig_fidelity], fitting_report
diff --git a/tests/test_classifier.py b/tests/test_classifier.py
index 6fc47b179..fb3a8a4c7 100644
--- a/tests/test_classifier.py
+++ b/tests/test_classifier.py
@@ -47,7 +47,7 @@ def initialization(data_path):
qubits = [0, 1]
for qubit in qubits:
qubit_dir = qubit_path(base_dir, qubit)
- classifiers = ["linear_svm"]
+ classifiers = ["linear_svm", "qblox_fit"]
table, y_test, _x_test = run.train_qubit(
data_path, base_dir, qubit=qubit, classifiers=classifiers
)
diff --git a/tests/test_fitting_methods.py b/tests/test_fitting_methods.py
index a97aa93f2..66010ce50 100644
--- a/tests/test_fitting_methods.py
+++ b/tests/test_fitting_methods.py
@@ -14,6 +14,7 @@
)
from qibocal.fitting.utils import (
cos,
+ cumulative,
exp,
flipping,
freq_r_mathieu,
@@ -531,3 +532,9 @@ def test_drag_tunning_fit(label, caplog):
data, "beta_param[dimensionless]", "MSR[V]", [0], labels=label
)
assert "drag_tuning_fit: the fitting was not succesful" in caplog.text
+
+
+def test_cumulative():
+ points = x = np.linspace(0, 9, 10)
+ cum = cumulative(x, points)
+ assert np.array_equal(cum, points)
diff --git a/tests/test_niGSC_simulfilteredrb.py b/tests/test_niGSC_simulfilteredrb.py
index 8d89567e4..5a0b358df 100644
--- a/tests/test_niGSC_simulfilteredrb.py
+++ b/tests/test_niGSC_simulfilteredrb.py
@@ -177,7 +177,7 @@ def test_post_processing(
def test_build_report():
- depths = [1, 5, 10, 15, 20, 25]
+ depths = [1, 5, 10, 15]
nshots = 128
runs = 10
nqubits = 1
diff --git a/tests/test_niGSC_standardrb.py b/tests/test_niGSC_standardrb.py
index 1e3b8c92d..371f8689c 100644
--- a/tests/test_niGSC_standardrb.py
+++ b/tests/test_niGSC_standardrb.py
@@ -166,7 +166,7 @@ def test_post_processing(nqubits: int, depths: list, runs: int, nshots: int):
def test_build_report():
- depths = [1, 5, 10, 15, 20, 25]
+ depths = [1, 5, 10]
nshots = 1024
runs = 5
nqubits = 1