diff --git a/.gitignore b/.gitignore index 06a30c8bf..97167ada7 100644 --- a/.gitignore +++ b/.gitignore @@ -133,3 +133,7 @@ dmypy.json # QCVV output directories 20* + +# Visual Studio Code +.vscode +*.DS_Store diff --git a/doc/source/api-reference/qibocal.rst b/doc/source/api-reference/qibocal.rst index 9ce84f075..79934b7e0 100644 --- a/doc/source/api-reference/qibocal.rst +++ b/doc/source/api-reference/qibocal.rst @@ -128,6 +128,7 @@ and `csv `_. :members: :member-order: bysource + Fitting functions ----------------- @@ -138,3 +139,62 @@ Fitting functions :members: :undoc-members: :show-inheritance: + +Gate set characterization +------------------------- + +.. _abstract-module-label: + +Abstract and Basic methods +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: qibocal.calibrations.niGSC.basics.curcuitfactory + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: qibocal.calibrations.niGSC.basics.experiment + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: qibocal.calibrations.niGSC.basics.fitting + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: qibocal.calibrations.niGSC.basics.plot + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: qibocal.calibrations.niGSC.basics.utils + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: qibocal.calibrations.niGSC.basics.noisemodels + :members: + :undoc-members: + :show-inheritance: + +Standard RB +^^^^^^^^^^^ +.. automodule:: qibocal.calibrations.niGSC.standardrb + :members: + :undoc-members: + :show-inheritance: + +Simultaneous Filtered RB +^^^^^^^^^^^^^^^^^^^^^^^^ +.. automodule:: qibocal.calibrations.niGSC.simulfilteredrb + :members: + :undoc-members: + :show-inheritance: + +X-ID RB +^^^^^^^ +.. automodule:: qibocal.calibrations.niGSC.XIdrb + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/conf.py b/doc/source/conf.py index 05b2b3789..7cf6b46ec 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -45,6 +45,7 @@ "sphinx.ext.intersphinx", "recommonmark", "sphinx_markdown_tables", + "sphinx.ext.autosectionlabel", ] # Add any paths that contain templates here, relative to this directory. diff --git a/doc/source/getting-started/index.rst b/doc/source/getting-started/index.rst index 07c6b730c..fa8d65aae 100644 --- a/doc/source/getting-started/index.rst +++ b/doc/source/getting-started/index.rst @@ -11,5 +11,6 @@ your quantum hardware. installation interface runcard + niGSC/niGSC_and_standardrb resonator_sample/resonator_spectroscopy_sample example diff --git a/doc/source/getting-started/niGSC/images/Example_standardRB_report.png b/doc/source/getting-started/niGSC/images/Example_standardRB_report.png new file mode 100644 index 000000000..4d44e6777 Binary files /dev/null and b/doc/source/getting-started/niGSC/images/Example_standardRB_report.png differ diff --git a/doc/source/getting-started/niGSC/images/Example_standardRBerror_report.png b/doc/source/getting-started/niGSC/images/Example_standardRBerror_report.png new file mode 100644 index 000000000..6df816548 Binary files /dev/null and b/doc/source/getting-started/niGSC/images/Example_standardRBerror_report.png differ diff --git a/doc/source/getting-started/niGSC/images/StandardRB_error.png b/doc/source/getting-started/niGSC/images/StandardRB_error.png new file mode 100644 index 000000000..c915ffdd7 Binary files /dev/null and b/doc/source/getting-started/niGSC/images/StandardRB_error.png differ diff --git a/doc/source/getting-started/niGSC/images/StandardRB_noerror.png b/doc/source/getting-started/niGSC/images/StandardRB_noerror.png new file mode 100644 index 000000000..a94386756 Binary files /dev/null and b/doc/source/getting-started/niGSC/images/StandardRB_noerror.png differ diff --git a/doc/source/getting-started/niGSC/images/qibocal_niGSC.png b/doc/source/getting-started/niGSC/images/qibocal_niGSC.png new file mode 100644 index 000000000..3dd8a3d49 Binary files /dev/null and b/doc/source/getting-started/niGSC/images/qibocal_niGSC.png differ diff --git a/doc/source/getting-started/niGSC/niGSC_and_standardrb.rst b/doc/source/getting-started/niGSC/niGSC_and_standardrb.rst new file mode 100644 index 000000000..7b3b97be8 --- /dev/null +++ b/doc/source/getting-started/niGSC/niGSC_and_standardrb.rst @@ -0,0 +1,196 @@ + +========================================= +Non-Interactive Gate Set Characterization +========================================= + +``qibocal`` offers a seamless and efficient solution for implementing non-interactive Gate Set Characterization (niGSC) protocols. +An abstract level explanation of the architecture is depicted in the figure below. + + +.. image:: images/qibocal_niGSC.png + :width: 700 + :alt: Diagram explaining how non-interactive gate set characterization works on an abstract level. + + +In ``abstract.py``, see section :ref:`abstract-module-label` of the APIs documentation, +the overall structure is set up. +The foundation is three classes: + + 1. The circuit factory, an iterator which produces circuits when called. + 2. The Experiment class which takes an iterable object producing circuits, optional some parameters. It is able to execute the circuits and to overwrite/store/process the necessary data. + 3. A class storing and displaying the results of a gate set characterization. + +A gate set characterization protocol will be demonstrated with a standard randomized benchmarking scheme. + +Standard Randomized Benchmarking +================================ + +In general, the idea of a standard RB protocol is to quantify the performance of a quantum device +regarding the implementation of gates. +For that the quantum device needs to be able to prepare a quantum state, modify this quantum state with gates, +and in the end measure the outcome of the modulation of the quantum state. +The survival probability of the ground state :math:`y(m)` dependent on the sequence length +of the applied gates :math:`m`, is modulated with the following exponential function: + +.. math:: + y_{\text{fit}}(m) = Ap^m+B, + +where :math:`A, B` are SPAM (state preparation and measurement)-error constants absorbing the state preparation and measurement error. +The base of the exponent :math:`p` is used to calculate the average gate fidelity :math:`F` as + +.. math:: + F = 1-\frac{d-1}{d}(1-p), + +which in the end is the reported value for the quantum device which is being benchmarked. + +In the Figure below on the left a Block sphere is shown with two gates (in green) acting on the initial state +depicted as a grey bullet on top of the sphere. A final inverse gate (in blue) redoes the sequence and +in an ideal case should bring the state back to the inital state. +Standard RB normally is performed with Clifford gates :math:`\mathcal{C}`. +On the very right a schematic explanation of different sequence lengths is shown. +The triangles stand for state preparation, the green boxes for Clifford gates, the blue boxes for +the inverse gate and the boxes to the very left in the scheme are measurements. +Between the sphere and the boxes a sketch of how the survival probabilities should behave +is shown. Since there are no error gates the survival probability stays at one. + + +.. image:: images/StandardRB_noerror.png + :width: 700 + :alt: Error less Bloch sphere next to an image depicting the survival probability vs sequence length next to an image of state preparation + unitary manipulation + measurements + +A gate independent error :math:`\Lambda` acting with every applied gate on the quantum state +is a good first approximation as error model. +There are more general models, they will be explored in a different tutorial. +In the Figure below general depolarizing channels are linked between the Clifford gates making +the signal drop exponentially. +The above defined fitting function can then be used to calculate the average gate fidelity. + +.. image:: images/StandardRB_error.png + :width: 700 + :alt: Error Bloch sphere next to an image depicting the survival probability vs sequence length next to an image of state preparation + unitary manipulation + measurements + + +Qibocal Implementation +^^^^^^^^^^^^^^^^^^^^^^ + +First define the necessary variables which will be used when initiating the +circuit factory and the experiment object. + +.. code-block:: python + + # Define the necessary variables. + nqubits = 1 # Number of qubits in the quantum hardware. + depths = [0,1,5] # How many random gates there are in each circuit. + runs = 2 # The amount of repetitions of the whole experiment. + nshots = 5 # When a circuit is executed how many shots are used. + +The circuit factory +""""""""""""""""""" + +Now build the circuit factory, and check out how it works. + +.. code-block:: python + + from qibocal.calibrations.protocols import standardrb + # To not alter the iterator when using it, make deep copies. + from copy import deepcopy + factory = standardrb.ModuleFactory(nqubits, depths, runs) + # ``factory`` is an iterator class object generating single clifford + # gates with the last gate always the inverse of the whole gate sequence. + # There are mainly three ways how to extract the circuits. + # 1. Make a list out of the iterator object. + circuits_list1 = list(deepcopy(factory)) + # 2. Use a for loop. + circuits_list2 = [] + for circuit in deepcopy(factory): + circuits_list2.append(circuit) + # 3. Make an iterator and extract the circuits with the next method. + iter_factory = iter(deepcopy(factory)) + circuits_list3, iterate = [], True + while iterate: + try: + circuits_list3.append(next(iter_factory)) + except StopIteration: + iterate = False + # All the three lists have circuits constructed with + # single clifford gates according to the ``depths``list, + # repeated ``runs``many times. + +The experiment +"""""""""""""" + +.. code-block:: python + + # Initiate the standard RB experiment. To make it simpler + # first without simulated noise on the circuits. + experiment = standardrb.ModuleExperiment(factory, nshots) + # Nothing happened yet. The experiment has to be executed + # to execute the single circuits and store the samples along + # with the number of applied gates. + experiment.perform(experiment.execute) + # Check out the data in a data frame. Since there is no noise all + # the samples from the measured qubits were in the ground state. + print(experiment.dataframe) + # samples depth + # 0 [[0], [0], [0], [0], [0]] 0 + # 1 [[0], [0], [0], [0], [0]] 1 + # 2 [[0], [0], [0], [0], [0]] 5 + # 3 [[0], [0], [0], [0], [0]] 0 + # 4 [[0], [0], [0], [0], [0]] 1 + # 5 [[0], [0], [0], [0], [0]] 5 + +The postprocessing +"""""""""""""""""" + +The standard randomized benchmarking protocol aims at analyzing the probability +of the state coming back to the inital state when inversing all the gates applied gates. +Since normally the initial state is the grounds state :math:`\ket{0}` the survival +of the ground state probability has to be analyzed. +And with analyzed it is meant to extract the probabilities for every sequence (or depth) +of each run, average over the runs, fit an exponential decay to the signal and use the +base of the exponent to calculate the average gate fidelity. + +.. code-block:: python + + # Make the experiment calculate its own ground state probability, + # it will be appended to the data. + standardrb.post_processing_sequential(experiment) + # Now the data attribute of the experiment object has all its needs + # for the desired signal (ground state survival probability) to + # be fitted and plotted. It only has to be aggregated. + df_aggr = standardrb.get_aggregational_data(experiment) + # The build_report functions knows how to plot the aggregated data along + # with the sequential data and returns the report figure. + fig = standardrb.build_report(experiment, df_aggr) + fig.show() + +.. image:: images/Example_standardRB_report.png + :width: 600 + :alt: Screenshot of Report when executing the code from above + +When simulating the standard RB scheme noise can be added using the qibo.noise module. +It has to be predefined and passed when initiating the experiment object. + +.. code-block:: python + + from qibocal.calibrations.protocols import standardrb + from qibocal.calibrations.protocols.noisemodels import PauliErrorOnUnitary + nqubits = 1 + depths = [0,1,5,10,15] + runs = 10 + nshots = 128 + # Define the noise model used in the simulation. + noisemodel = PauliErrorOnUnitary(0.01, 0.02, 0.04) + factory = standardrb.ModuleFactory(nqubits, depths, runs) + # Add the noise model to the experiment. + experiment = standardrb.ModuleExperiment( + factory, nshots, noisemodel = noisemodel) + experiment.perform(experiment.execute) + experiment.perform(standardrb.groundstate_probabilities) + df_aggr = standardrb.get_aggregational_data(experiment) + fig = standardrb.build_report(experiment, df_aggr) + fig.show() + +.. image:: images/Example_standardRBerror_report.png + :width: 600 + :alt: Screenshot of Report when executing the code from above diff --git a/doc/source/getting-started/resonator_sample/resonator_spectroscopy_sample.rst b/doc/source/getting-started/resonator_sample/resonator_spectroscopy_sample.rst index 41fd86783..8dbc7a98a 100644 --- a/doc/source/getting-started/resonator_sample/resonator_spectroscopy_sample.rst +++ b/doc/source/getting-started/resonator_sample/resonator_spectroscopy_sample.rst @@ -44,7 +44,7 @@ feature greatly helps in designing the span list. Example showcased in Hz, for a **small_spans**: list of spans for the small scans that are run when the feature is located. Scans of 10 equaly distributes points will be executed. This will directly correlate to the final desired precision of the feature. Example showcased in Hz, - for a resonator punchout. +for a resonator punchout. **resolution**: precision in the sampled space for the gaussianly distributed samples. In the initial part of the algorithm, this value will be the precision used to detect the feature. This value needs to be small enough for the feature to be diff --git a/runcards/niGSC.yml b/runcards/niGSC.yml new file mode 100644 index 000000000..ec0525fb4 --- /dev/null +++ b/runcards/niGSC.yml @@ -0,0 +1,28 @@ +backend: numpy + +qubits: [0] + +format: pickle + +actions: + standardrb: + nqubits: 1 + depths: [1,3,5,7,10] + runs: 2 + nshots: 1024 + noise_model: PauliErrorOnUnitary + noise_params: [0.01, 0.01, 0.01] + XIdrb: + nqubits: 1 + depths: [1,2,3,4,5,6,7,8,9,10] + runs: 5 + nshots: 10 + noise_model: PauliErrorOnX + noise_params: [0.05, 0.01, 0.01] + simulfilteredrb: + nqubits: 1 + depths: [1,3,5,7,10] + runs: 2 + nshots: 1024 + noise_model: PauliErrorOnUnitary + noise_params: [0.01, 0.01, 0.01] diff --git a/src/qibocal/calibrations/__init__.py b/src/qibocal/calibrations/__init__.py index a580b8e19..b559b8154 100644 --- a/src/qibocal/calibrations/__init__.py +++ b/src/qibocal/calibrations/__init__.py @@ -8,4 +8,3 @@ from qibocal.calibrations.characterization.resonator_spectroscopy_sample import * from qibocal.calibrations.characterization.spin_echo import * from qibocal.calibrations.characterization.t1 import * -from qibocal.calibrations.protocols.test import * diff --git a/src/qibocal/calibrations/niGSC/XIdrb.py b/src/qibocal/calibrations/niGSC/XIdrb.py new file mode 100644 index 000000000..0ccdfb1db --- /dev/null +++ b/src/qibocal/calibrations/niGSC/XIdrb.py @@ -0,0 +1,212 @@ +""" This script implements an easy 1-qubit protocol with only X-gates and identities. +It is a great example on how to write an own niGSC protocol. The single functions above have +little descriptions for the purpose of that function and what is important to include. + +1. Step: + Define the two module specific classes which are used in defining and executing an experiment, + the circuit factory and experiment class. + They can also just inherit everything from another module. +2. Step: + Write the sequential post processing functions. +3. Step: + Write the aggregational post processing function. +4. Step: + Write the function to build a report. When using the qq module, a plotly figure has to be returned. +""" + +# These libraries should be enough when starting a new protocol. +from __future__ import annotations + +from collections.abc import Iterable + +import numpy as np +import pandas as pd +from plotly.graph_objects import Figure +from qibo import gates +from qibo.models import Circuit +from qibo.noise import NoiseModel + +import qibocal.calibrations.niGSC.basics.fitting as fitting_methods +from qibocal.calibrations.niGSC.basics.circuitfactory import CircuitFactory +from qibocal.calibrations.niGSC.basics.experiment import Experiment +from qibocal.calibrations.niGSC.basics.plot import Report, scatter_fit_fig +from qibocal.config import raise_error + + +# Define the circuit factory class for this specific module. +class ModuleFactory(CircuitFactory): + def __init__(self, nqubits: int, depths: list, qubits: list = []) -> None: + super().__init__(nqubits, depths, qubits) + if not len(self.qubits) == 1: + raise_error( + ValueError, + "This class is written for gates acting on only one qubit, not {} qubits.".format( + len(self.qubits) + ), + ) + self.name = "XId" + + def build_circuit(self, depth: int): + # Initiate the empty circuit from qibo with 'self.nqubits' + # many qubits. + circuit = Circuit(1, density_matrix=True) + # There are only two gates to choose from for every qubit. + a = [gates.I(0), gates.X(0)] + # Draw sequence length many zeros and ones. + random_ints = np.random.randint(0, 2, size=depth) + # Get the Xs and Ids with random_ints as indices. + gate_lists = np.take(a, random_ints) + # Add gates to circuit. + circuit.add(gate_lists) + circuit.add(gates.M(0)) + return circuit + + +# Define the experiment class for this specific module. +class ModuleExperiment(Experiment): + def __init__( + self, + circuitfactory: Iterable, + data: Iterable | None = None, + nshots: int | None = None, + noise_model: NoiseModel = None, + ) -> None: + super().__init__(circuitfactory, data, nshots, noise_model) + self.name = "XIdRB" + + def execute(self, circuit: Circuit, datarow: dict) -> dict: + datadict = super().execute(circuit, datarow) + datadict["depth"] = circuit.ngates - 1 + # TODO change that. + datadict["countX"] = circuit.gate_types["x"] + return datadict + + +# Define the result class for this specific module. +class moduleReport(Report): + def __init__(self) -> None: + super().__init__() + self.title = "X-Id Benchmarking" + + +# The filter functions/post processing functions always dependent on circuit and data row! +# It is executed row by row when used on an experiment object. +def filter_sign(circuit: Circuit, datarow: dict) -> dict: + """Calculates the filtered signal for the XId. + + :math:`n_X` denotes the amount of :math:`X` gates in the circuit with gates + :math:`g` and :math`i` the outcome which is either ground state :math:`0` + or exited state :math:`1`. + + .. math:: + f_{\\text{sign}}(i,g) + = (-1)^{n_X\\%2 + i}/2 + + + Args: + circuit (Circuit): Not needed here. + datarow (dict): The dictionary with the samples from executed circuits and amount of + X gates in the executed circuit. + + Returns: + dict: _description_ + """ + samples = datarow["samples"] + countX = datarow["countX"] + filtersign = 0 + for s in samples: + filtersign += (-1) ** (countX % 2 + s[0]) / 2.0 + datarow["filter"] = filtersign / len(samples) + return datarow + + +# All the predefined sequential postprocessing / filter functions are bundled together here. +def post_processing_sequential(experiment: Experiment): + """Perform sequential tasks needed to analyze the experiment results. + + The data is added/changed in the experiment, nothing has to be returned. + + Args: + experiment (Experiment): Experiment object after execution of the experiment itself. + """ + + # Compute and add the ground state probabilities row by row. + experiment.perform(filter_sign) + + +# After the row by row execution of tasks comes the aggregational task. Something like calculation +# of means, deviations, fitting data, tasks where the whole data as to be looked at, and not just +# one instance of circuit + other information. +def get_aggregational_data(experiment: Experiment) -> pd.DataFrame: + """Computes aggregational tasks, fits data and stores the results in a data frame. + + No data is manipulated in the ``experiment`` object. + + Args: + experiment (Experiment): After sequential postprocessing of the experiment data. + + Returns: + pd.DataFrame: The summarized data. + """ + # Has to fit the column describtion from ``filter_sign``. + depths, ydata = experiment.extract("filter", "depth", "mean") + _, ydata_std = experiment.extract("filter", "depth", "std") + # Fit the filtered signal for each depth, there could be two overlaying exponential functions. + popt, perr = fitting_methods.fit_exp2_func(depths, ydata) + # Build a list of dictionaries with the aggregational information. + data = [ + { + "depth": depths, # The x-axis. + "data": ydata, # The filtred signal. + "2sigma": 2 * ydata_std, # The 2 * standard deviation error for each depth. + "fit_func": "exp2_func", # Which function was used to fit. + "popt": { + "A1": popt[0], + "A2": popt[1], + "p1": popt[2], + "p2": popt[2], + }, # The fitting parameters. + "perr": { + "A1_err": perr[0], + "A2_err": perr[1], + "p1_err": perr[2], + "p2_err": perr[3], + }, # The estimated errors. + } + ] + df = pd.DataFrame(data, index=["filter"]) + return df + + +# This is highly individual. The only important thing for the qq module is that a plotly figure is +# returned, if qq is not used any type of figure can be build. +def build_report(experiment: Experiment, df_aggr: pd.DataFrame) -> Figure: + """Use data and information from ``experiment`` and the aggregated data data frame to + build a report as plotly figure. + + Args: + experiment (Experiment): After sequential postprocessing of the experiment data. + df_aggr (pd.DataFrame): Normally build with ``get_aggregational_data`` function. + + Returns: + (Figure): A plotly.graphical_object.Figure object. + """ + + # Initiate a report object. + report = moduleReport() + # Add general information to the object. + report.info_dict["Number of qubits"] = len(experiment.data[0]["samples"][0]) + report.info_dict["Number of shots"] = len(experiment.data[0]["samples"]) + report.info_dict["runs"] = experiment.extract("samples", "depth", "count")[1][0] + report.info_dict["Fitting daviations"] = "".join( + [ + "{}:{:.3f} ".format(key, df_aggr.loc["filter"]["perr"][key]) + for key in df_aggr.loc["filter"]["perr"] + ] + ) + # Use the predefined ``scatter_fit_fig`` function from ``basics.utils`` to build the wanted + # plotly figure with the scattered filtered data along with the mean for + # each depth and the exponential fit for the means. + report.all_figures.append(scatter_fit_fig(experiment, df_aggr, "depth", "filter")) + # Return the figure the report object builds out of all figures added to the report. + return report.build() diff --git a/src/qibocal/calibrations/protocols/__init__.py b/src/qibocal/calibrations/niGSC/__init__.py similarity index 100% rename from src/qibocal/calibrations/protocols/__init__.py rename to src/qibocal/calibrations/niGSC/__init__.py diff --git a/src/qibocal/calibrations/niGSC/basics/__init__.py b/src/qibocal/calibrations/niGSC/basics/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/qibocal/calibrations/niGSC/basics/circuitfactory.py b/src/qibocal/calibrations/niGSC/basics/circuitfactory.py new file mode 100644 index 000000000..428940219 --- /dev/null +++ b/src/qibocal/calibrations/niGSC/basics/circuitfactory.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +import abc + +import numpy as np +from qibo import gates +from qibo.models import Circuit +from qibo.quantum_info.random_ensembles import random_clifford + +from qibocal.calibrations.niGSC.basics.utils import ONEQ_GATES +from qibocal.config import raise_error + + +class CircuitFactory: + """Iterator object, when called a random circuit with wanted gate + distribution is created. + """ + + def __init__( + self, nqubits: int, depths: list | np.ndarray | int, qubits: list = [] + ) -> None: + self.nqubits = nqubits if nqubits is not None else len(qubits) + self.qubits = qubits if qubits else list(range(nqubits)) + if isinstance(depths, int): + depths = [depths] + self.depths = depths + self.name = "Abstract" + + def __len__(self): + return len(self.depths) + + def __iter__(self) -> CircuitFactory: + self.n = 0 + return self + + def __next__(self) -> Circuit: + """Build a ``Circuit`` object with wanted gate distribution. + + Embeds the circuit if the number of qubits ``self.nqubits`` is bigger than + the amount of qubits indicated in ``self.qubits``. + + Raises: + StopIteration: When the generator comes to an end. + + Returns: + Circuit: With specific gate distribution. + """ + + # Check if the stop critarion is met. + if self.n >= len(self.depths): + raise StopIteration + else: + circuit = self.build_circuit(self.depths[self.n]) + self.n += 1 + # Distribute the circuit onto the given support. + bigcircuit = Circuit(self.nqubits) + bigcircuit.add(circuit.on_qubits(*self.qubits)) + return bigcircuit + + def build_circuit(self, depth: int) -> Circuit: + """Initiate a ``qibo.models.Circuit`` object and fill it with the wanted gates. + + Which gates are wanted is encoded in ``self.gates_layer()``. + Add a measurement gate for every qubit. + + Args: + depth (int): How many layers there are in the circuit. + + Returns: + Circuit: the circuit with ``depth`` many layers. + """ + # Initiate the ``Circuit`` object with the amount of active qubits. + circuit = Circuit(len(self.qubits)) + # Go throught the depth/layers of the circuit and add gate layers. + for _ in range(depth): + circuit.add(self.gate_layer()) + # Add a ``Measurement`` gate for every qubit. + circuit.add(gates.M(*range(len(self.qubits)))) + return circuit + + @abc.abstractmethod + def gate_layer(self): + """This method has to be overwritten by the inheriting child class.""" + raise_error(NotImplementedError) + + +class Qibo1qGatesFactory(CircuitFactory): + """When called creates a random circuit build out of 1-qubit non-parameterized + qibo gates. + """ + + def __init__(self, nqubits: int, depths: list, qubits: list = []) -> None: + super().__init__(nqubits, depths, qubits) + self.name = "Qibo1qGates" + + def gate_layer(self): + """Build a circuit out of random 1-qubit qibo gates. + + Returns: + (list) filled with random 1 qubit qibo gates + """ + gates_list = [] + # Draw the random indices for the list where the names of the 1-qubit + # non-parameterized gates are stored. + for count, rint in enumerate( + np.random.randint(0, len(ONEQ_GATES), size=len(self.qubits)) + ): + # Load the random gate. + rand_gate = getattr(gates, ONEQ_GATES[rint]) + # Append the random gate initialized with the qubit is should act on. + gates_list.append(rand_gate(count)) + return gates_list + + +class SingleCliffordsFactory(CircuitFactory): + """Creates circuits filled with random single qubit Clifford gates for + each active qubit. + """ + + def __init__(self, nqubits: int, depths: list, qubits: list = []) -> None: + super().__init__(nqubits, depths, qubits) + self.name = "SingleCliffords" + + def gate_layer(self) -> list: + """Use the ``qibo.quantum_info`` module to draw as many random clifford + unitaries as there are (active) qubits, make unitary gates with them. + + Returns: + (list) filled with ``qibo.gates.Unitary``: the simulatanous 1q-Clifford gates. + """ + + gates_list = [] + # Make sure the shape is suitable for iterating over the Clifford matrices returned + # by the ``random_clifford`` function. + random_cliffords = random_clifford(self.qubits).reshape(len(self.qubits), 2, 2) + # Make gates out of the unitary matrices. + for count, rand_cliff in enumerate(random_cliffords): + # Build the gate with the random Clifford matrix, let is act on the right qubit. + gates_list.append(gates.Unitary(rand_cliff, count)) + return gates_list diff --git a/src/qibocal/calibrations/niGSC/basics/experiment.py b/src/qibocal/calibrations/niGSC/basics/experiment.py new file mode 100644 index 000000000..6bf02994e --- /dev/null +++ b/src/qibocal/calibrations/niGSC/basics/experiment.py @@ -0,0 +1,227 @@ +from __future__ import annotations + +import pickle +from collections.abc import Iterable +from os.path import isfile +from typing import Callable + +import numpy as np +import pandas as pd +from qibo.models import Circuit +from qibo.noise import NoiseModel + +from qibocal.calibrations.niGSC.basics.utils import copy_circuit, experiment_directory +from qibocal.config import raise_error + + +class Experiment: + """Experiment objects which holds an iterable circuit factory along with + a simple data structure associated to each circuit. + + Args: + circuitfactory (Iterable): Gives a certain amount of circuits when + iterated over. + nshots (int): For execution of circuit, indicates how many shots. + data (list): If filled, ``data`` can be used to specifying parameters + while executing a circuit or deciding how to process results. + It is used to store all relevant data. + """ + + def __init__( + self, + circuitfactory: Iterable | None, + data: Iterable | None = None, + nshots: int | None = 128, + noise_model: NoiseModel | None = None, + ) -> None: + """ """ + + if circuitfactory is not None and not isinstance(circuitfactory, Iterable): + raise_error( + TypeError, + "given circuit factory has wrong type {}, must be Iterable | None.".format( + type(circuitfactory) + ), + ) + self.circuitfactory = circuitfactory + if data is not None and not isinstance(data, Iterable): + raise_error( + TypeError, + "given data has wrong type {}, must be Iterable | None ".format( + type(data) + ), + ) + self.data = data + if nshots is not None and not isinstance(nshots, int): + raise_error( + TypeError, + "given nshots has wrong type {}, must be int | None".format( + type(nshots) + ), + ) + self.nshots = nshots + if noise_model is not None and not isinstance(noise_model, NoiseModel): + raise_error( + TypeError, + "given circuit factory has wrong type {}, must be qibo NoiseModel | None .".format( + type(noise_model) + ), + ) + self.__noise_model = noise_model + self.name = "Abstract" + + @property + def noise_model(self): + return self.__noise_model + + @property + def dataframe(self) -> pd.DataFrame: + return pd.DataFrame(self.data) + + @classmethod + def load(cls, path: str) -> Experiment: + """Creates an experiment object with data and if possible with circuits. + + Args: + path (str): The directory from where the object should be restored. + + Returns: + Experiment: The object with data (and circuitfactory). + """ + datapath = f"{path}experiment_data.pkl" + circuitspath = f"{path}circuits.pkl" + if isfile(datapath): + with open(datapath, "rb") as f: + data = pickle.load(f) + if isinstance(data, pd.DataFrame): # pragma: no cover + data = data.to_dict("records") + nshots = len(data[0]["samples"]) + else: + data, nshots = None, None + if isfile(circuitspath): + with open(circuitspath, "rb") as f: + circuitfactory = pickle.load(f) + else: + circuitfactory = None + # Initiate an instance of the experiment class. + obj = cls(circuitfactory, data=data, nshots=nshots) + return obj + + def save(self, path: str | None = None) -> str: + """Creates a path if None given and pickles relevant data from ``self.data`` + and if ``self.circuitfactory`` is a list that one too. + + Returns: + (str): The path of stored experiment. + """ + + # Check if path to store is given, if not create one. If yes check if the last character + # is a /, if not add it. + if path is None: + self.path = experiment_directory("rb") + else: + self.path = path if path[-1] == "/" else f"{path}/" + # Only if the circuit factory is a list it will be stored. + if isinstance(self.circuitfactory, list): + with open(f"{self.path}circuits.pkl", "wb") as f: + pickle.dump(self.circuitfactory, f) + # And only if data is not None the data list (full of dicionaries) will be + # stored. + if self.data is not None: + with open(f"{self.path}experiment_data.pkl", "wb") as f: + pickle.dump(self.data, f) + # It is convenient to know the path after storing, so return it. + return self.path + + def extract( + self, output_key: str, groupby_key: str = "", agg_type: str | Callable = "" + ) -> np.ndarray | tuple[np.ndarray, np.ndarray]: + """Return wanted values from ``self.data`` via the dataframe property. + + If ``groupby_key`` given, aggregate the dataframe, extract the data by which the frame was + grouped, what was calculated given the ``agg_type`` parameter. Two arrays are returned then, + the group values and the grouped (aggregated) data. If no ``agg_type`` given use a linear function. + If ``groupby_key`` not given, only return the extracted data from given key. + + Args: + output_key (str): Key name of the wanted output. + groupby_key (str): If given, group with that key name. + agg_type (str): If given, calcuted aggregation function on groups. + + Returns: + Either one or two np.ndarrays. If no grouping wanted, just the data. If grouping + wanted, the values after which where grouped and the grouped data. + """ + + # Check what parameters where given. + if not groupby_key and not agg_type: + # No grouping and no aggreagtion is wanted. Just return the wanted output key. + return np.array(self.dataframe[output_key].tolist()) + elif not groupby_key and agg_type: + # No grouping wanted, just an aggregational task on all the data. + return self.dataframe[output_key].apply(agg_type) + elif groupby_key and not agg_type: + # Grouping is wanted but no aggregation, use a linear function. + df = self.dataframe.get([output_key, groupby_key]) + grouped_df = df.groupby(groupby_key, group_keys=True).apply(lambda x: x) + return grouped_df[groupby_key].to_numpy(), grouped_df[output_key].to_numpy() + else: + df = self.dataframe.get([output_key, groupby_key]) + grouped_df = df.groupby(groupby_key, group_keys=True).apply(agg_type) + return grouped_df.index.to_numpy(), grouped_df[output_key].to_numpy() + + def prebuild(self) -> None: + """Converts the attribute ``circuitfactory`` which is in general + an iterable into a list. + """ + self.circuitfactory = list(self.circuitfactory) + + def perform(self, sequential_task: Callable[[Circuit, dict], dict]) -> None: + """Takes a given function, checks the status of attribute ``circuitfactory`` + and ``data`` and executes the sequential function row by row altering the + ``self.data`` attribute. + + Either ``self.circuitfactory`` or ``self.data`` cannot be ``None`` and + if not ``None`` they have to have the right length. + + Args: + sequential_task (callable[[Circuit, dict], dict]): A function applied + row by row alterting each datarow. + """ + # Either the circuit factory or the data rows can be empty. + # If ``self.data`` is not empty the actual list element is altered without + # storing it after alternation. + # Both ``circuit`` and ``datarow`` can be provided: + if self.circuitfactory is not None and self.data is not None: + for circuit, datarow in zip(self.circuitfactory, self.data): + datarow = sequential_task( + copy_circuit(circuit.copy(deep=True)), datarow + ) + # Only``datarow`` can be provided: + elif self.circuitfactory is None and self.data is not None: + for datarow in self.data: + datarow = sequential_task(None, datarow) + # Only ``circuit`` can be provided: + elif self.circuitfactory is not None and self.data is None: + newdata = [] + for circuit in self.circuitfactory: + newdata.append( + sequential_task(copy_circuit(circuit.copy(deep=True)), {}) + ) + self.data = newdata + else: + raise_error(ValueError, "Both attributes circuitfactory and data are None.") + + def execute(self, circuit: Circuit, datarow: dict) -> dict: + """Executes a circuit, returns the single shot results in a dict. + + Args: + circuit (Circuit): Will be executed, has to return samples. + datarow (dict): Dictionary with parameters for execution and immediate + postprocessing information. + """ + + if self.noise_model is not None: + circuit = self.noise_model.apply(circuit) + samples = circuit(nshots=self.nshots).samples() + return {"samples": samples} diff --git a/src/qibocal/calibrations/niGSC/basics/fitting.py b/src/qibocal/calibrations/niGSC/basics/fitting.py new file mode 100644 index 000000000..67febc00c --- /dev/null +++ b/src/qibocal/calibrations/niGSC/basics/fitting.py @@ -0,0 +1,176 @@ +"""In this python script the fitting methods for the gate set protocols are defined. +They consist mostly of exponential decay fitting. +""" + +from typing import Optional, Tuple, Union + +import numpy as np +from scipy.linalg import hankel, svd +from scipy.optimize import curve_fit + +from qibocal.config import raise_error + + +def exp1_func(x: np.ndarray, A: float, f: float) -> np.ndarray: + """Return :math:`A\\cdot f^x` where ``x`` is an ``np.ndarray`` and + ``A``, ``f`` are floats + """ + return A * f**x + + +def exp1B_func(x: np.ndarray, A: float, f: float, B: float) -> np.ndarray: + """Return :math:`A\\cdot f^x+B` where ``x`` is an ``np.ndarray`` and + ``A``, ``f``, ``B`` are floats + """ + return A * f**x + B + + +def exp2_func(x: np.ndarray, A1: float, A2: float, f1: float, f2: float) -> np.ndarray: + """Return :math:`A_1\\cdot f_1^x+A_2\\cdot f_2^x` where ``x`` is an ``np.ndarray`` and + ``A1``, ``f1``, ``A2``, ``f2`` are floats. There is no linear offsett B. + """ + return A1 * f1**x + A2 * f2**x + + +def esprit( + xdata: np.ndarray, + ydata: np.ndarray, + num_decays: int, + hankel_dim: Optional[int] = None, +) -> np.ndarray: + """Implements the ESPRIT algorithm for peak detection. + + Args: + xdata (np.ndarray): Labels of data. Has to be equally spaced. + ydata (np.ndarray): The data where multiple decays are fitted in. + num_decays (int): How many decays should be fitted. + hankel_dim (int | None, optional): The Hankel dimension. Defaults to None. + + Returns: + np.ndarray: The decay parameters. + + Raises: + ValueError: When the x-labels are not equally spaced the algorithm does not work. + + """ + # Check for equally spacing. + if not np.all(xdata[1:] - xdata[:-1] == xdata[1] - xdata[0]): + raise_error(ValueError, "xdata has to be equally spaced.") + sampleRate = 1 / (xdata[1] - xdata[0]) + # xdata has to be an array. + xdata = np.array(xdata) + # Define the Hankel dimension if not given. + if hankel_dim is None: + hankel_dim = int(np.round(0.5 * xdata.size)) + # Fine tune the dimension of the hankel matrix such that the mulitplication + # processes don't break. + hankel_dim = max(num_decays + 1, hankel_dim) + hankel_dim = min(hankel_dim, xdata.size - num_decays + 1) + hankelMatrix = hankel(ydata[:hankel_dim], ydata[(hankel_dim - 1) :]) + # Calculate nontrivial (nonzero) singular vectors of the hankel matrix. + U, _, _ = svd(hankelMatrix, full_matrices=False) + # Cut off the columns to the amount which is needed. + U_signal = U[:, :num_decays] + # Calculte the solution. + spectralMatrix = np.linalg.pinv(U_signal[:-1,]) @ U_signal[1:,] + # Calculate the poles/eigenvectors and space them right. Return them. + return np.linalg.eigvals(spectralMatrix) * sampleRate + + +def fit_exp1B_func( + xdata: Union[np.ndarray, list], ydata: Union[np.ndarray, list], **kwargs +) -> Tuple[tuple, tuple]: + """Calculate an single exponential A*p^m+B fit to the given ydata. + + Args: + xdata (Union[np.ndarray, list]): The x-labels. + ydata (Union[np.ndarray, list]): The data to be fitted. + + Returns: + Tuple[tuple, tuple]: The fitting parameters (A, p, B) and the estimated error + (A_err, p_err, B_err) + """ + + # Check if all the values in ``ydata``are the same. That would make the + # exponential fit unnecessary. + if np.all(ydata == ydata[0]): + popt, perr = (ydata[0], 1.0, 0), (0, 0, 0) + else: + # Get a guess for the exponential function. + guess = kwargs.get("p0", [0.5, 0.9, 0.8]) + # If the search for fitting parameters does not work just return + # fixed parameters where one can see that the fit did not work + try: + popt, pcov = curve_fit( + exp1B_func, + xdata, + ydata, + p0=guess, + method="lm", + ) + perr = tuple(np.sqrt(np.diag(pcov))) + except: + popt, perr = (0, 0, 0), (0, 0, 0) + return popt, perr + + +def fit_exp1_func( + xdata: Union[np.ndarray, list], ydata: Union[np.ndarray, list], **kwargs +) -> Tuple[tuple, tuple]: + """Calculate an single exponential A*p^m fit to the given ydata, no linear offset. + + Args: + xdata (Union[np.ndarray, list]): The x-labels. + ydata (Union[np.ndarray, list]): The data to be fitted. + + Returns: + Tuple[tuple, tuple]: The fitting parameters (A, p) and the estimated error (A_err, p_err). + """ + + # Check if all the values in ``ydata``are the same. That would make the + # exponential fit unnecessary. + if np.all(ydata == ydata[0]): + popt, perr = (ydata[0], 1.0), (0, 0) + else: + # Get a guess for the exponential function. + guess = kwargs.get("p0", [0.5, 0.9]) + # If the search for fitting parameters does not work just return + # fixed parameters where one can see that the fit did not work + try: + # Build a new function such that the linear offset is zero. + popt, pcov = curve_fit( + exp1_func, + xdata, + ydata, + p0=guess, + method="lm", + ) + perr = tuple(np.sqrt(np.diag(pcov))) + except: + popt, perr = (0, 0), (0, 0) + + return popt, perr + + +def fit_exp2_func( + xdata: Union[np.ndarray, list], ydata: Union[np.ndarray, list], **kwargs +) -> Tuple[tuple, tuple]: + """Calculate 2 exponentials on top of each other, fit to the given ydata. + + No linear offset, the ESPRIT algorithm is used to identify the two exponential decays. + + Args: + xdata (Union[np.ndarray, list]): The x-labels. + ydata (Union[np.ndarray, list]): The data to be fitted + + Returns: + Tuple[tuple, tuple]: (A1, A2, f1, f2) with f* the decay parameters. + """ + + # TODO how are the errors estimated? + # TODO the data has to have a sufficiently big size, check that. + decays = esprit(np.array(xdata), np.array(ydata), 2) + vandermonde = np.vander(decays, N=xdata[-1] + 1, increasing=True) + vandermonde = np.take(vandermonde, xdata, axis=1) + alphas = np.linalg.pinv(vandermonde.T) @ np.array(ydata).reshape(-1, 1).flatten() + return tuple([*alphas, *decays]), (0, 0, 0, 0) diff --git a/src/qibocal/calibrations/niGSC/basics/noisemodels.py b/src/qibocal/calibrations/niGSC/basics/noisemodels.py new file mode 100644 index 000000000..599d04466 --- /dev/null +++ b/src/qibocal/calibrations/niGSC/basics/noisemodels.py @@ -0,0 +1,54 @@ +""" Costum error models are build here for making it possible to pass +strings describing the error model via runcards in qibocal. +They inherit from the qibo noise NoiseModel module and are prebuild. +""" + +import numpy as np +from qibo import gates +from qibo.noise import NoiseModel, PauliError + +from qibocal.config import raise_error + + +class PauliErrorOnUnitary(NoiseModel): + """Builds a noise model with pauli flips acting on unitaries. + + If no initial parameters for px, py, pz are given, random values + are drawn (in sum not bigger than 1). + """ + + def __init__(self, *args) -> None: + super().__init__() + # Check if number of arguments is 0 or 1 and if it's equal to None + if len(args) == 0 or (len(args) == 1 and args[0] is None): + # Assign random values to params. + params = np.random.uniform(0, 0.25, size=3) + elif len(args) == 3: + params = args + else: + # Raise ValueError if given paramters are wrong. + raise_error( + ValueError, + "Wrong number of error parameters, 3 != {}.".format(len(args)), + ) + self.build(*params) + + def build(self, *params): + # Add PauliError to gates.Unitary + self.add(PauliError(*params), gates.Unitary) + + +class PauliErrorOnX(PauliErrorOnUnitary): + """Builds a noise model with pauli flips acting on X gates. + + Inherited from ``PauliErrorOnUnitary`` but the ``build`` method is + overwritten to act on X gates. + If no initial parameters for px, py, pz are given, random values + are drawn (in sum not bigger than 1). + """ + + def __init__(self, *args) -> None: + super().__init__(*args) + + def build(self, *params): + self.add(PauliError(*params), gates.X) diff --git a/src/qibocal/calibrations/niGSC/basics/plot.py b/src/qibocal/calibrations/niGSC/basics/plot.py new file mode 100644 index 000000000..7144a0283 --- /dev/null +++ b/src/qibocal/calibrations/niGSC/basics/plot.py @@ -0,0 +1,131 @@ +import numpy as np +import pandas as pd +import plotly.graph_objects as go +from plotly.subplots import make_subplots + +import qibocal.calibrations.niGSC.basics.fitting as fitting_methods +from qibocal.calibrations.niGSC.basics.experiment import Experiment + + +def plot_qq(folder: str, routine: str, qubit, format): + """Load the module for which the plot has to be done. + + + Args: + folder (str): The folder path where the data was stored. + routine (str): The routine name, here the module name. + qubit (Any): Is not used here + format (Any): Is not used here. + + Returns: + plotly figure: plotly graphical object figure. + """ + + import importlib + + # Load the module, something like 'standardrb'. + module = importlib.import_module(f"qibocal.calibrations.niGSC.{routine}") + # Load the experiment with the class method ``load``. + experiment = module.ModuleExperiment.load(f"{folder}/data/{routine}/") + # In this data frame the precomputed fitting parameters and other + # parameters for fitting and plotting are stored. + aggr_df = pd.read_pickle(f"{folder}/data/{routine}/fit_plot.pkl") + # Build the figure/report using the responsible module. + plotly_figure = module.build_report(experiment, aggr_df) + return [plotly_figure] + + +class Report: + """Once initialized with the correct parameters an Report object can build + reports to display results of a gate set characterization experiment. + """ + + def __init__(self) -> None: + self.all_figures = [] + self.title = "Report" + self.info_dict = {} + + def build(self): + l = len(self.all_figures) + subplot_titles = [figdict.get("subplot_title") for figdict in self.all_figures] + fig = make_subplots( + rows=int(l / 2) + l % 2 + 1, + cols=1 if l == 1 else 2, + subplot_titles=subplot_titles, + ) + for count, fig_dict in enumerate(self.all_figures): + plot_list = fig_dict["figs"] + for plot in plot_list: + fig.add_trace(plot, row=count // 2 + 1, col=count % 2 + 1) + + fig.add_annotation( + dict( + bordercolor="black", + font=dict(color="black", size=16), + x=0.0, + y=1.0 / (int(l / 2) + l % 2 + 1) - len(self.info_dict) * 0.005, + showarrow=False, + text="
".join( + [f"{key} : {value}\n" for key, value in self.info_dict.items()] + ), + align="left", + textangle=0, + yanchor="top", + xref="paper", + yref="paper", + ) + ) + fig.update_xaxes(title_font_size=18, tickfont_size=16) + fig.update_yaxes(title_font_size=18, tickfont_size=16) + fig.update_layout( + font_family="Averta", + hoverlabel_font_family="Averta", + title_text=self.title, + title_font_size=24, + legend_font_size=16, + hoverlabel_font_size=16, + showlegend=True, + height=500 * (int(l / 2) + l % 2) if l > 2 else 1000, + width=1000, + ) + + return fig + + +def scatter_fit_fig( + experiment: Experiment, df_aggr: pd.DataFrame, xlabel: str, index: str +): + fig_traces = [] + dfrow = df_aggr.loc[index] + fig_traces.append( + go.Scatter( + x=experiment.dataframe[xlabel], + y=experiment.dataframe[index], + line=dict(color="#6597aa"), + mode="markers", + marker={"opacity": 0.2, "symbol": "square"}, + name="runs", + ) + ) + fig_traces.append( + go.Scatter( + x=dfrow[xlabel], + y=dfrow["data"], + line=dict(color="#aa6464"), + mode="markers", + name="average", + ) + ) + x_fit = np.linspace(min(dfrow[xlabel]), max(dfrow[xlabel]), len(dfrow[xlabel]) * 20) + y_fit = getattr(fitting_methods, dfrow["fit_func"])(x_fit, *dfrow["popt"].values()) + fig_traces.append( + go.Scatter( + x=x_fit, + y=y_fit, + name="".join( + ["{}:{:.3f} ".format(key, dfrow["popt"][key]) for key in dfrow["popt"]] + ), + line=go.scatter.Line(dash="dot"), + ) + ) + return {"figs": fig_traces, "xlabel": xlabel, "ylabel": index} diff --git a/src/qibocal/calibrations/niGSC/basics/utils.py b/src/qibocal/calibrations/niGSC/basics/utils.py new file mode 100644 index 000000000..d4f6a075b --- /dev/null +++ b/src/qibocal/calibrations/niGSC/basics/utils.py @@ -0,0 +1,110 @@ +from copy import deepcopy +from os import mkdir +from os.path import isdir +from typing import Union + +import numpy as np +from qibo.models import Circuit + +# Gates, without having to define any paramters +ONEQ_GATES = ["I", "X", "Y", "Z", "H", "S", "SDG", "T", "TDG"] + + +def experiment_directory(name: str): + """Make the directory where the experiment will be stored.""" + from datetime import datetime + + overall_dir = "experiments/" + # Check if the overall directory exists. If not create it. + if not isdir(overall_dir): + mkdir(overall_dir) + # Get the current date and time. + dt_string = datetime.now().strftime("%y%b%d_%H%M%S") + # Every script name ``name`` gets its own directory. + subdirectory = f"{overall_dir}{name}/" + if not isdir(subdirectory): # pragma: no cover + mkdir(subdirectory) + # Name the final directory for this experiment. + final_directory = f"{subdirectory}experiment{dt_string}/" + if not isdir(final_directory): # pragma: no cover + mkdir(final_directory) + else: + already_file, count = True, 1 + while already_file: + final_directory = f"{subdirectory}experiment{dt_string}_{count}/" + if not isdir(final_directory): + mkdir(final_directory) + already_file = False + else: + count += 1 + return final_directory + + +def effective_depol(error_channel, **kwargs): + """ """ + liouvillerep = error_channel.to_pauli_liouville(normalize=True) + d = int(np.sqrt(len(liouvillerep))) + depolp = (np.trace(liouvillerep) - 1) / (d**2 - 1) + return depolp + + +def probabilities(allsamples: Union[list, np.ndarray]) -> np.ndarray: + """Takes the given list/array (3-dimensional) of samples and returns probabilities + for each possible state to occure. + + The states for 4 qubits are order as follows: + [(0, 0, 0, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 1, 1), (0, 1, 0, 0), + (0, 1, 0, 1), (0, 1, 1, 0), (0, 1, 1, 1), (1, 0, 0, 0), (1, 0, 0, 1), + (1, 0, 1, 0), (1, 0, 1, 1), (1, 1, 0, 0), (1, 1, 0, 1), (1, 1, 1, 0), (1, 1, 1, 1)] + + Args: + allsamples (Union[list, np.ndarray]): The single shot samples, 3-dimensional. + + Returns: + np.ndarray: Probability array of 2 dimension. + """ + + from itertools import product + + # Make it an array to use the shape property. + allsamples = np.array(allsamples) + # The array has to have three dimension. + if len(allsamples.shape) == 2: + allsamples = allsamples[None, ...] + nqubits, nshots = len(allsamples[0][0]), len(allsamples[0]) + # Create all possible state vectors. + allstates = list(product([0, 1], repeat=nqubits)) + # Iterate over all the samples and count the different states. + probs = [ + [np.sum(np.product(samples == state, axis=1)) for state in allstates] + for samples in allsamples + ] + probs = np.array(probs) / (nshots) + return probs + + +def copy_circuit(circuit: Circuit): + newcircuit = Circuit(circuit.nqubits) + for gate in circuit.queue: + newcircuit.add(deepcopy(gate)) + return newcircuit + + +def gate_fidelity(eff_depol: float, primitive=False) -> float: + """Returns the average gate fidelity given the effective depolarizing parameter for single qubits. + + If primitive is True, divide by additional 1.875 as convetion in RB reporting. + (The original reasoning was that Clifford gates are typically + compiled with an average number of 1.875 Pi half pulses.) + + Args: + eff_depol (float): The effective depolarizing parameter. + primitive (bool, optional): If True, additionally divide by 1.875. + + Returns: + float: Average gate fidelity + """ + infidelity = (1 - eff_depol) / 2 + if primitive: + infidelity /= 1.875 + return 1 - infidelity diff --git a/src/qibocal/calibrations/niGSC/simulfilteredrb.py b/src/qibocal/calibrations/niGSC/simulfilteredrb.py new file mode 100644 index 000000000..e5cbf2808 --- /dev/null +++ b/src/qibocal/calibrations/niGSC/simulfilteredrb.py @@ -0,0 +1,233 @@ +# TODO simulfilteredrb + +from __future__ import annotations + +from collections.abc import Iterable +from itertools import product + +import numpy as np +import pandas as pd +from plotly.graph_objects import Figure +from qibo.models import Circuit +from qibo.noise import NoiseModel + +import qibocal.calibrations.niGSC.basics.fitting as fitting_methods +from qibocal.calibrations.niGSC.basics.circuitfactory import SingleCliffordsFactory +from qibocal.calibrations.niGSC.basics.experiment import Experiment +from qibocal.calibrations.niGSC.basics.plot import Report, scatter_fit_fig + + +class ModuleFactory(SingleCliffordsFactory): + pass + + +class ModuleExperiment(Experiment): + """Inherits from abstract ``Experiment`` class.""" + + def __init__( + self, + circuitfactory: Iterable, + data: Iterable | None = None, + nshots: int | None = None, + noise_model: NoiseModel = None, + ) -> None: + """Calles the parent method and additionally prebuilds the circuit factory + making it a list stored in memory and saves if ``save()`` method is called. + + Args: + circuitfactory (Iterable): Gives a certain amount of circuits when + iterated over. + nshots (int): For execution of circuit, indicates how many shots. + data (Iterable): If filled, ``data`` can be used to specifying parameters + while executing a circuit or deciding how to process results. + It is used to store all relevant data. + """ + + super().__init__(circuitfactory, data, nshots, noise_model) + # Make the circuitfactory a list. That way they will be stored when + # calling the save method and the circuits are not lost once executed. + self.prebuild() + self.name = "CorrelatedRB" + + def execute(self, circuit: Circuit, datarow: dict) -> dict: + """Overwrited parents method. Executes a circuit, returns the single shot results and depth. + + Args: + circuit (Circuit): Will be executed, has to return samples. + datarow (dict): Dictionary with parameters for execution and + immediate postprocessing information. + """ + + datadict = super().execute(circuit, datarow) + # Measurement gate should not contribute to depth, therefore -1. + # Take the amount of qubits into account. + datadict["depth"] = int((circuit.ngates - 1) / len(datadict["samples"][0])) + return datadict + + +class moduleReport(Report): + def __init__(self) -> None: + super().__init__() + self.title = "Correlated Filtered Randomized Benchmarking" + + +def filter_function(circuit: Circuit, datarow: dict) -> dict: + """Calculates the filtered signal for every crosstalk irrep. + + Every irrep has a projector charactarized with a bit string + :math:`\\boldsymbol{\\lambda}\\in\\mathbb{F}_2^N` where :math:`N` is the + number of qubits. + The experimental outcome for each qubit is denoted as + :math:`\\ket{i_k}` with :math:`i_k=0, 1` with :math:`d=2`. + + .. math:: + f_{\\boldsymbol{\\lambda}}(i,g) + = \\frac{1}{2^{N-|\\boldsymbol{\\lambda}|}} + \\sum_{\\mathbf b\\in\\mathbb F_2^N} + (-1)^{|\\boldsymbol{\\lambda}\\wedge\\mathbf b|} + \\frac{1}{d^N}\\left(\\prod_{k=1}^N(d|\\bra{i_k} U_{g_{(k)}} + \\ket{0}|^2)^{\\lambda_k-\\lambda_kb_k}\\right) + + Args: + circuit (Circuit): The circuit used to produce the samples in ``datarow``. + datarow (dict): Dictionary with samples produced by given ``circuit``. + + Returns: + datarow (dict): Filtered signals are stored additionally. + """ + + # Extract amount of used qubits and used shots. + nshots, nqubits = datarow["samples"].shape + # For qubits the local dimension is 2. + d = 2 + # Fuse the gates for each qubit. + fused_circuit = circuit.fuse(max_qubits=1) + # Extract for each qubit the ideal state. + # If depth = 0 there is only a measurement circuit and it does + # not have an implemented matrix. Set the ideal states to ground states. + if circuit.depth == 1: + ideal_states = np.tile(np.array([1, 0]), nqubits).reshape(nqubits, 2) + else: + ideal_states = np.array( + [fused_circuit.queue[k].matrix[:, 0] for k in range(nqubits)] + ) + # Go through every irrep. + f_list = [] + for l in np.array(list(product([False, True], repeat=nqubits))): + # Check if the trivial irrep is calculated + if not sum(l): + # In the end every value will be divided by ``nshots``. + a = nshots + else: + # Get the supported ideal outcomes and samples + # for this irreps projector. + suppl = ideal_states[l] + suppsamples = datarow["samples"][:, l] + a = 0 + # Go through all ``nshots`` samples + for s in suppsamples: + # Go through all combinations of (0,1) on the support + # of lambda ``l``. + for b in np.array(list(product([False, True], repeat=sum(l)))): + # Calculate the sign depending on how many times the + # nontrivial projector was used. + # Take the product of all probabilities chosen by the + # experimental outcome which are supported by the + # inverse of b. + a += (-1) ** sum(b) * np.prod( + d * np.abs(suppl[~b][np.eye(2, dtype=bool)[s[~b]]]) ** 2 + ) + # Normalize with inverse of effective measuremetn. + f_list.append(a * (d + 1) ** sum(l) / d**nqubits) + for kk in range(len(f_list)): + datarow[f"irrep{kk}"] = f_list[kk] / nshots + return datarow + + +def post_processing_sequential(experiment: Experiment): + """Perform sequential tasks needed to analyze the experiment results. + + The data is added/changed in the experiment, nothign has to be returned. + + Args: + experiment (Experiment): Experiment object after execution of the experiment itself. + """ + + # Compute and add the ground state probabilities row by row. + experiment.perform(filter_function) + + +def get_aggregational_data(experiment: Experiment) -> pd.DataFrame: + """Computes aggregational tasks, fits data and stores the results in a data frame. + + No data is manipulated in the ``experiment`` object. + + Args: + experiment (Experiment): After sequential postprocessing of the experiment data. + + Returns: + pd.DataFrame: The summarized data. + """ + + # Needed for the amount of plots in the report + nqubits = len(experiment.data[0]["samples"][0]) + data_list, index = [], [] + # Go through every irreducible representation projector used in the filter function. + for kk in range(2**nqubits): + # This has to match the label chosen in ``filter_function``. + ylabel = f"irrep{kk}" + depths, ydata = experiment.extract(ylabel, "depth", "mean") + _, ydata_std = experiment.extract(ylabel, "depth", "std") + # Fit an exponential without linear offset. + popt, perr = fitting_methods.fit_exp1_func(depths, ydata) + data_list.append( + { + "depth": depths, # The x-axis. + "data": ydata, # The mean of ground state probability for each depth. + "2sigma": 2 * ydata_std, # The standard deviation error for each depth. + "fit_func": "exp1_func", # Which function was used to fit. + "popt": {"A": popt[0], "p": popt[1]}, # The fitting paramters. + "perr": {"A_err": perr[0], "p_err": perr[1]}, # The estimated errors. + } + ) + # Store the name to set is as row name for the data. + index.append(ylabel) + # Create a data frame out of the list with dictionaries. + df = pd.DataFrame(data_list, index=index) + return df + + +def build_report(experiment: Experiment, df_aggr: pd.DataFrame) -> Figure: + """Use data and information from ``experiment`` and the aggregated data dataframe to + build a reprot as plotly figure. + + Args: + experiment (Experiment): After sequential postprocessing of the experiment data. + df_aggr (pd.DataFrame): Normally build with ``get_aggregational_data`` function. + + Returns: + (Figure): A plotly.graphical_object.Figure object. + """ + + # Initiate a report object. + report = moduleReport() + # Add general information to the object. + report.info_dict["Number of qubits"] = len(experiment.data[0]["samples"][0]) + report.info_dict["Number of shots"] = len(experiment.data[0]["samples"]) + report.info_dict["runs"] = experiment.extract("samples", "depth", "count")[1][0] + lambdas = iter(product([0, 1], repeat=int(report.info_dict["Number of qubits"]))) + for kk, l in enumerate(lambdas): + # Add the fitting errors which will be displayed in a box under the plots. + report.info_dict[f"Fitting daviations irrep {l}"] = "".join( + [ + "{}:{:.3f} ".format(key, df_aggr.loc[f"irrep{kk}"]["perr"][key]) + for key in df_aggr.loc[f"irrep{kk}"]["perr"] + ] + ) + # Use the predefined ``scatter_fit_fig`` function from ``basics.utils`` to build the wanted + # plotly figure with the scattered filter function points and then mean per depth. + figdict = scatter_fit_fig(experiment, df_aggr, "depth", f"irrep{kk}") + # Add a subplot title for each irrep. + figdict["subplot_title"] = f"Irrep {l}" + report.all_figures.append(figdict) + return report.build() diff --git a/src/qibocal/calibrations/niGSC/standardrb.py b/src/qibocal/calibrations/niGSC/standardrb.py new file mode 100644 index 000000000..2af13af04 --- /dev/null +++ b/src/qibocal/calibrations/niGSC/standardrb.py @@ -0,0 +1,221 @@ +""" Here the standard randomized benchmarking is implemented using the +niGSC (non-interactive gate set characterization) architecture. +""" + + +from __future__ import annotations + +from collections.abc import Iterable + +import numpy as np +import pandas as pd +from plotly.graph_objects import Figure +from qibo import gates +from qibo.models import Circuit +from qibo.noise import NoiseModel + +import qibocal.calibrations.niGSC.basics.fitting as fitting_methods +from qibocal.calibrations.niGSC.basics.circuitfactory import SingleCliffordsFactory +from qibocal.calibrations.niGSC.basics.experiment import Experiment +from qibocal.calibrations.niGSC.basics.plot import Report, scatter_fit_fig +from qibocal.calibrations.niGSC.basics.utils import gate_fidelity + + +class ModuleFactory(SingleCliffordsFactory): + def __init__(self, nqubits: int, depths: list, qubits: list = []) -> None: + super().__init__(nqubits, depths, qubits) + self.name = "SingleCliffordsInv" + + def build_circuit(self, depth: int) -> Circuit: + """Overwrites parent method. Add an inverse gate before the measurement. + + Args: + depth (int): How many gate layers. + + Returns: + (Circuit): A circuit with single qubit Clifford gates with ``depth`` many layers + and an inverse gate before the measurement gate. + """ + + # Initiate a ``Circuit`` object with as many qubits as is indicated with the list + # of qubits on which the gates should act on. + circuit = Circuit(len(self.qubits)) + # Add ``depth`` many gate layers. + for _ in range(depth): + circuit.add(self.gate_layer()) + # If there is at least one gate in the circuit, add an inverse. + if depth > 0: + # Build a gate out of the unitary of the whole circuit and + # take the daggered version of that. + circuit.add( + gates.Unitary(circuit.unitary(), *range(len(self.qubits))).dagger() + ) + circuit.add(gates.M(*range(len(self.qubits)))) + return circuit + + +class ModuleExperiment(Experiment): + def __init__( + self, + circuitfactory: Iterable, + data: Iterable | None = None, + nshots: int | None = None, + noise_model: NoiseModel = None, + ) -> None: + """Calls the parent method, sets name. + + Args: + circuitfactory (Iterable): Gives a certain amount of circuits when + iterated over. + nshots (int): For execution of circuit, indicates how many shots. + data (Iterable): If filled, ``data`` can be used to specifying parameters + while executing a circuit or deciding how to process results. + It is used to store all relevant data. + """ + super().__init__(circuitfactory, data, nshots, noise_model) + self.name = "StandardRB" + + def execute(self, circuit: Circuit, datarow: dict) -> dict: + """Overwrites parent class method. Executes a circuit, adds the single shot results + and depth of the circuit to the data row. + + Args: + circuit (Circuit): Will be executed, has to return samples. + datarow (dict): Dictionary with parameters for execution and + immediate postprocessing information. + + Returns: + datarow (dict): + """ + + # Execute parent class method. + datarow = super().execute(circuit, datarow) + # Substract 1 for sequence length to not count the inverse gate and + # substract the measurement gate. + datarow["depth"] = (circuit.depth - 2) if circuit.depth > 1 else 0 + return datarow + + +class moduleReport(Report): + def __init__(self) -> None: + super().__init__() + self.title = "Standard Randomized Benchmarking" + + +def groundstate_probabilities(circuit: Circuit, datarow: dict) -> dict: + """Calculates the ground state probability with data from single shot measurements. + + Args: + circuit (Circuit): Not needed here. + datarow (dict): The dictionary holding the samples. + + Returns: + dict: The updated dictionary. + """ + + # Get the samples data from the dictionary + samples = datarow["samples"] + # Create the ground state as it would look like in a single shot measurement. + ground = np.array([0] * len(samples[0])) + # Calculate the probability of the samples being in the ground state + # by counting the number of samples that are equal to the ground state + # and dividing it by the total number of samples. + datarow["groundstate probability"] = np.sum( + np.product(samples == ground, axis=1) + ) / len(samples) + # Return the updated dictionary. + return datarow + + +def post_processing_sequential(experiment: Experiment): + """Perform sequential tasks needed to analyze the experiment results. + + The data is added/changed in the experiment, nothing has to be returned. + + Args: + experiment (Experiment): Experiment object after execution of the experiment itself. + """ + + # Compute and add the ground state probabilities row by row. + experiment.perform(groundstate_probabilities) + + +def get_aggregational_data(experiment: Experiment) -> pd.DataFrame: + """Computes aggregational tasks, fits data and stores the results in a data frame. + + No data is manipulated in the ``experiment`` object. + + Args: + experiment (Experiment): After sequential postprocessing of the experiment data. + + Returns: + pd.DataFrame: The summarized data. + """ + + # Has to fit the column description from ``groundstate_probabilities``. + depths, ydata = experiment.extract("groundstate probability", "depth", "mean") + _, ydata_std = experiment.extract("groundstate probability", "depth", "std") + # Fit the ground state probabilies mean for each depth. + popt, perr = fitting_methods.fit_exp1B_func(depths, ydata) + # Build a list of dictionaries with the aggregational information. + data = [ + { + "depth": depths, # The x-axis. + "data": ydata, # The mean of ground state probability for each depth. + "2sigma": 2 * ydata_std, # The 2 * standard deviation error for each depth. + "fit_func": "exp1B_func", # Which function was used to fit. + "popt": { + "A": popt[0], + "p": popt[1], + "B": popt[2], + }, # The fitting paramters. + "perr": { + "A_err": perr[0], + "p_err": perr[1], + "B_err": perr[2], + }, # The estimated errors. + } + ] + # The row name will be displayed as y-axis label. + df = pd.DataFrame(data, index=["groundstate probability"]) + return df + + +def build_report(experiment: Experiment, df_aggr: pd.DataFrame) -> Figure: + """Use data and information from ``experiment`` and the aggregated data data frame to + build a report as plotly figure. + + Args: + experiment (Experiment): After sequential postprocessing of the experiment data. + df_aggr (pd.DataFrame): Normally build with ``get_aggregational_data`` function. + + Returns: + (Figure): A plotly.graphical_object.Figure object. + """ + + # Initiate a report object. + report = moduleReport() + # Add general information to the object. + report.info_dict["Number of qubits"] = len(experiment.data[0]["samples"][0]) + report.info_dict["Number of shots"] = len(experiment.data[0]["samples"]) + report.info_dict["runs"] = experiment.extract("samples", "depth", "count")[1][0] + report.info_dict["Fitting daviations"] = "".join( + [ + "{}:{:.3f} ".format(key, df_aggr.iloc[0]["perr"][key]) + for key in df_aggr.iloc[0]["perr"] + ] + ) + report.info_dict["Gate fidelity"] = "{:.4f}".format( + gate_fidelity(df_aggr.iloc[0]["popt"]["p"]) + ) + report.info_dict["Gate fidelity primitive"] = "{:.4f}".format( + gate_fidelity(df_aggr.iloc[0]["popt"]["p"], primitive=True) + ) + # Use the predefined ``scatter_fit_fig`` function from ``basics.plot`` to build the wanted + # plotly figure with the scattered ground state probability data along with the mean for + # each depth and the exponential fit for the means. + report.all_figures.append( + scatter_fit_fig(experiment, df_aggr, "depth", "groundstate probability") + ) + # Return the figure the report object builds out of all figures added to the report. + return report.build() diff --git a/src/qibocal/calibrations/protocols/test.py b/src/qibocal/calibrations/protocols/test.py deleted file mode 100644 index dfec71788..000000000 --- a/src/qibocal/calibrations/protocols/test.py +++ /dev/null @@ -1,22 +0,0 @@ -from qibo import gates, models - -from qibocal.data import Data - - -def test( - platform, - qubit: list, - nshots, - points=1, -): - data = Data("test", quantities=["nshots", "probabilities"]) - nqubits = len(qubit) - circuit = models.Circuit(nqubits) - circuit.add(gates.H(qubit[0])) - circuit.add(gates.H(qubit[1])) - # circuit.add(gates.H(1)) - circuit.add(gates.M(*qubit)) - execution = circuit(nshots=nshots) - - data.add({"nshots": nshots, "probabilities": execution.probabilities()}) - yield data diff --git a/src/qibocal/cli/builders.py b/src/qibocal/cli/builders.py index 37e2ef1be..3609ec8f3 100644 --- a/src/qibocal/cli/builders.py +++ b/src/qibocal/cli/builders.py @@ -1,4 +1,5 @@ import datetime +import importlib import inspect import os import shutil @@ -17,6 +18,120 @@ def load_yaml(path): return data +class ActionParser: + """Class for parsing and executing single actions in the runcard.""" + + def __init__(self, runcard, folder, name): + self.runcard = runcard + self.folder = folder + self.func = None + self.params = None + self.name = name + self.path = os.path.join(self.folder, f"data/{self.name}/") + + # FIXME: dummy fix + self.__name__ = name + + def build(self): + """Load function from :func:`qibocal.characterization.calibrations` and check arguments""" + if not os.path.exists(self.path): + os.makedirs(self.path) + # collect function from module + self.func = getattr(calibrations, self.name) + + sig = inspect.signature(self.func) + self.params = self.runcard["actions"][self.name] + for param in list(sig.parameters)[2:-1]: + if param not in self.params: + raise_error(AttributeError, f"Missing parameter {param} in runcard.") + + def execute(self, data_format, platform): + """Execute action and retrieve results.""" + if data_format is None: + raise_error(ValueError, f"Cannot store data using {data_format} format.") + + results = self.func(platform, self.runcard["qubits"], **self.params) + + for data in results: + getattr(data, f"to_{data_format}")(self.path) + + +class niGSCactionParser(ActionParser): + """ni = non interactive + GSC = gate set characterization + """ + + def __init__(self, runcard, folder, name): + super().__init__(runcard, folder, name) + + self.plots = [] + + self.nqubits = self.runcard["actions"][self.name]["nqubits"] + self.depths = self.runcard["actions"][self.name]["depths"] + self.runs = self.runcard["actions"][self.name]["runs"] + self.nshots = self.runcard["actions"][self.name]["nshots"] + + from qibocal.calibrations.niGSC.basics import noisemodels + + try: + self.noise_params = self.runcard["actions"][self.name]["noise_params"] + except KeyError: + self.noise_params = None + try: + self.noise_model = getattr( + noisemodels, self.runcard["actions"][self.name]["noise_model"] + )(*self.noise_params) + except: + self.noise_model = None + + def load_plot(self): + """Helper method to import the plotting function.""" + from qibocal.calibrations.niGSC.basics.plot import plot_qq + + self.plots.append((f"{self.name} protocol", plot_qq)) + + def build(self): + """Load appropirate module to run the experiment.""" + if not os.path.exists(self.path): + os.makedirs(self.path) + + self.module = importlib.import_module(f"qibocal.calibrations.niGSC.{self.name}") + + def execute(self, data_format, platform): + """Executes a non-interactive gate set characterication using only the wanted + module name. + + 1. Build the circuit factory. + 2. Build the experiment object with the circuit factory. + 3. Execute the circuits generated by the circuit factory and store the wanted results. + 4. Post process the data. + 5. Store the experiment, if needed the circuit factory, and the aggregated data. + + Args: + data_format (_type_): _description_ + platform (_type_): _description_ + """ + + # Initiate the factory and the experiment. + factory = self.module.ModuleFactory( + self.nqubits, self.depths * self.runs, qubits=self.runcard["qubits"] + ) + experiment = self.module.ModuleExperiment( + factory, nshots=self.nshots, noise_model=self.noise_model + ) + # Execute the circuits in the experiment. + experiment.perform(experiment.execute) + # Run the row by row postprocessing. + self.module.post_processing_sequential(experiment) + # Run aggregational tasks along with fitting. + # This will return a data frame, store it right away. + self.module.get_aggregational_data(experiment).to_pickle( + f"{self.path}/fit_plot.pkl" + ) + # Store the experiment. + experiment.save(self.path) + + class ActionBuilder: """Class for parsing and executing runcards. Args: @@ -35,11 +150,14 @@ def __init__(self, runcard, folder=None, force=False): self.backend, self.platform = self._allocate_backend( backend_name, platform_name, path, platform_runcard ) - self.qubits = { - q: self.platform.qubits[q] - for q in self.runcard["qubits"] - if q in self.platform.qubits - } + if self.platform is not None: + self.qubits = { + q: self.platform.qubits[q] + for q in self.runcard["qubits"] + if q in self.platform.qubits + } + else: + self.qubits = self.runcard.get("qubits") self.format = self.runcard["format"] # Saving runcard @@ -121,19 +239,6 @@ def save_meta(self, path, folder): with open(f"{path}/meta.yml", "w") as file: yaml.dump(meta, file) - def _build_single_action(self, name): - """Helper method to parse the actions in the runcard.""" - f = getattr(calibrations, name) - path = os.path.join(self.folder, f"data/{name}/") - os.makedirs(path) - sig = inspect.signature(f) - params = self.runcard["actions"][name] - for param in list(sig.parameters)[2:-1]: - if param not in params: - raise_error(AttributeError, f"Missing parameter {param} in runcard.") - - return f, params, path - def execute(self): """Method to execute sequentially all the actions in the runcard.""" if self.platform is not None: @@ -144,27 +249,24 @@ def execute(self): actions = [] for action in self.runcard["actions"]: actions.append(action) - routine, args, path = self._build_single_action(action) - self._execute_single_action(routine, args, path) - for qubit in self.qubits: - if self.platform is not None: - self.update_platform_runcard(qubit, action) + try: + parser = niGSCactionParser(self.runcard, self.folder, action) + parser.build() + parser.execute(self.format, self.platform) + # TODO: find a better way to choose between the two parsers + except (ModuleNotFoundError, KeyError): + parser = ActionParser(self.runcard, self.folder, action) + parser.build() + parser.execute(self.format, self.platform) + for qubit in self.qubits: + if self.platform is not None: + self.update_platform_runcard(qubit, action) self.dump_report(actions) if self.platform is not None: self.platform.stop() self.platform.disconnect() - def _execute_single_action(self, routine, arguments, path): - """Method to execute a single action and retrieving the results.""" - if self.format is None: - raise_error(ValueError, f"Cannot store data using {self.format} format.") - - results = routine(self.platform, self.qubits, **arguments) - - for data in results: - getattr(data, f"to_{self.format}")(path) - def update_platform_runcard(self, qubit, routine): try: data_fit = Data.load_data(self.folder, "data", routine, self.format, "fits") @@ -226,9 +328,13 @@ def __init__(self, path, actions=None): self.routines = [] if actions is None: actions = self.runcard.get("actions") + for action in actions: if hasattr(calibrations, action): routine = getattr(calibrations, action) + elif hasattr(calibrations.niGSC, action): + routine = niGSCactionParser(self.runcard, self.path, action) + routine.load_plot() else: raise_error(ValueError, f"Undefined action {action} in report.") @@ -251,7 +357,7 @@ def get_figure(self, routine, method, qubit): import tempfile figures = method(self.path, routine.__name__, qubit, self.format) - with tempfile.NamedTemporaryFile() as temp: + with tempfile.NamedTemporaryFile(delete=False) as temp: for figure in figures: figure.write_html(temp.name, include_plotlyjs=False, full_html=False) fightml = temp.read().decode("utf-8") diff --git a/src/qibocal/tests/niGSC.yml b/src/qibocal/tests/niGSC.yml new file mode 100644 index 000000000..ec0525fb4 --- /dev/null +++ b/src/qibocal/tests/niGSC.yml @@ -0,0 +1,28 @@ +backend: numpy + +qubits: [0] + +format: pickle + +actions: + standardrb: + nqubits: 1 + depths: [1,3,5,7,10] + runs: 2 + nshots: 1024 + noise_model: PauliErrorOnUnitary + noise_params: [0.01, 0.01, 0.01] + XIdrb: + nqubits: 1 + depths: [1,2,3,4,5,6,7,8,9,10] + runs: 5 + nshots: 10 + noise_model: PauliErrorOnX + noise_params: [0.05, 0.01, 0.01] + simulfilteredrb: + nqubits: 1 + depths: [1,3,5,7,10] + runs: 2 + nshots: 1024 + noise_model: PauliErrorOnUnitary + noise_params: [0.01, 0.01, 0.01] diff --git a/src/qibocal/tests/test_niGSC_circuitfactory.py b/src/qibocal/tests/test_niGSC_circuitfactory.py new file mode 100644 index 000000000..a428f2953 --- /dev/null +++ b/src/qibocal/tests/test_niGSC_circuitfactory.py @@ -0,0 +1,141 @@ +from collections.abc import Iterable +from copy import deepcopy + +import numpy as np +import pytest +from qibo import gates, models + +from qibocal.calibrations.niGSC.basics.circuitfactory import * +from qibocal.calibrations.niGSC.basics.utils import ONEQ_GATES +from qibocal.calibrations.niGSC.standardrb import ( + ModuleFactory as SingleCliffordsInvFactory, +) +from qibocal.calibrations.niGSC.XIdrb import ModuleFactory as XIdFactory + + +@pytest.fixture +def factories_singlequbitgates(): + thelist = [ + SingleCliffordsFactory, + Qibo1qGatesFactory, + SingleCliffordsInvFactory, + XIdFactory, + ] + return thelist + + +@pytest.fixture +def depths(): + return [0, 1, 5, 10, 30] + + +def abstract_factorytest(gfactory): + # The factory is an iterable. + assert isinstance(gfactory, Iterable) + # The objects it produces are of the type ``models.Circuit``. + for circuit in gfactory: + assert isinstance(circuit, models.Circuit) + + +def general_circuittest(gfactory: CircuitFactory): + """Check if the circuits produced by the given factory are + kind of random. + + Args: + gfactory (CircuitFactory): Produces circuits which are checked. + """ + factory1 = deepcopy(gfactory) + factory2 = deepcopy(gfactory) + same_count = 0 + count = 0 + for circuit1, circuit2 in zip(factory1, factory2): + same_circuit = True + for gate1, gate2 in zip(circuit1.queue[:-1], circuit2.queue[:-1]): + same_circuit *= np.array_equal(gate1.matrix, gate2.matrix) + same_count += same_circuit + count += 1 + # Half of the runs should not give the same + assert same_count <= count * 0.5 + + +def test_abstract_factory(): + cfactory = CircuitFactory(1, [1, 2] * 3, qubits=[0]) + with pytest.raises(NotImplementedError): + for circuit in cfactory: + print(circuit.draw()) + cfactory = CircuitFactory(1, 3, qubits=[0]) + assert cfactory.depths == [3] + + +@pytest.mark.parametrize("nqubits", [1, 2, 5]) +@pytest.mark.parametrize("runs", [1, 4]) +@pytest.mark.parametrize("qubits", [[0], [0, 2]]) +def test_general_singlequbitgates_factories( + factories_singlequbitgates: list, + nqubits: int, + qubits: list, + depths: list, + runs: int, +) -> None: + """Check for how random circuits are produced and if the lengths, shape + and randomness works. + + Args: + qubits (list): List of qubits + depths (list): list of depths for circuits + runs (int): How many randomly drawn cirucit for one depth value + + """ + if max(qubits) >= nqubits: + pass + else: + for factory_init in factories_singlequbitgates: + # XId factory is only defined for 1 qubit. + if max(qubits) > 0 and factory_init == XIdFactory: + with pytest.raises(ValueError): + factory = factory_init(nqubits, list(depths) * runs, qubits=qubits) + else: + factory = factory_init(nqubits, list(depths) * runs, qubits=qubits) + abstract_factorytest(factory) + # if factory.name not in ('XId', 'SingleCliffordsInv'): + general_circuittest(factory) + if "inv" in factory.name or "Inv" in factory.name: + # When checking the depth of circuits, the measurement gate and inverse gate + # has to be taken into account + additional_gates = 2 + else: + # When checking the depth of circuits, measurement gate has to be taken into account + additional_gates = 1 + for count, circuit in enumerate(factory): + if circuit.ngates == 1: + assert isinstance(circuit.queue[0], gates.measurements.M) + else: + assert ( + circuit.ngates + == depths[count % len(depths)] * len(qubits) + + additional_gates + ) + assert ( + circuit.depth + == depths[count % len(depths)] + additional_gates + ) + # Check the factories individual trades. + if factory.name in ("Qibo1qGates"): + for gate in circuit.queue[:-1]: + assert gate.__class__.__name__ in ONEQ_GATES + elif factory.name in ("SingleCliffords"): + for gate in circuit.queue[:-1]: + assert isinstance(gate, gates.Unitary) + elif factory.name in ("SingleCliffordsInv"): + for gate in circuit.queue[:-1]: + assert isinstance(gate, gates.Unitary) + elif factory.name in ("XId"): + for gate in circuit.queue[:-1]: + assert isinstance(gate, gates.X) or isinstance( + gate, gates.I + ) + else: + raise_error( + ValueError, + "Unknown circuitfactory :{}".format(factory.name), + ) diff --git a/src/qibocal/tests/test_niGSC_experiment.py b/src/qibocal/tests/test_niGSC_experiment.py new file mode 100644 index 000000000..a9a7749f8 --- /dev/null +++ b/src/qibocal/tests/test_niGSC_experiment.py @@ -0,0 +1,188 @@ +import os +from shutil import rmtree + +import numpy as np +import pytest +from qibo.models import Circuit + +from qibocal.calibrations.niGSC.basics.circuitfactory import Qibo1qGatesFactory +from qibocal.calibrations.niGSC.basics.experiment import * +from qibocal.calibrations.niGSC.basics.noisemodels import PauliErrorOnX + + +@pytest.fixture +def depths(): + return [0, 1, 5, 10, 30] + + +def test_Experiment_init(): + cfactory = None + data = None + noise_model = None + # All None should work. + experiment1 = Experiment(cfactory, data=data, noise_model=noise_model) + with pytest.raises(TypeError): + _ = Experiment(1) + _ = Experiment(None, 1) + _ = Experiment(None, None, True) + _ = Experiment(None, None, None, 1) + + +@pytest.mark.parametrize("nqubits", [2, 3]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("qubits", [[0], [0, 1]]) +@pytest.mark.parametrize("nshots", [12, 23]) +def test_Experiment_execute( + nqubits: int, depths: list, runs: int, qubits: list, nshots: int +): + cfactory1 = Qibo1qGatesFactory(nqubits, depths * runs, qubits=qubits) + experiment1 = Experiment(cfactory1, nshots=nshots) + experiment1.perform(experiment1.execute) + assert experiment1.extract("samples").shape == ( + len(depths * runs), + nshots, + len(qubits), + ) + noise_model = PauliErrorOnX() + cfactory2 = Qibo1qGatesFactory(nqubits, depths * runs, qubits=qubits) + experiment2 = Experiment(cfactory2, nshots=nshots, noise_model=noise_model) + experiment2.perform(experiment2.execute) + assert experiment2.extract("samples").shape == ( + len(depths * runs), + nshots, + len(qubits), + ) + + +@pytest.mark.parametrize("nqubits", [1, 2]) +@pytest.mark.parametrize("runs", [1, 3]) +def test_Experiment_perform(nqubits: int, depths: list, runs: int): + def nocircuit_dict_empty(circuit: Circuit, datadict: dict): + datadict["nshots"] = np.random.randint(1, 10) + return datadict + + def nocirucuit_dict_filled(circuit: Circuit, datadict: dict): + datadict["depthstr"] = "".join((*datadict.keys(), str(datadict["depth"]))) + return datadict + + def circuit_nodata(circuit: Circuit, datadict: dict): + datadict["depth"] = circuit.depth + return datadict + + def circuit_data(circuit: Circuit, datadict: dict): + nshots = datadict["nshots"] + datadict["samples"] = circuit(nshots=nshots).samples() + return datadict + + cfactory1 = Qibo1qGatesFactory(nqubits, depths * runs) + checkdepths = np.array(depths * runs) + 1 + checkdepthswithstring = [f"depth{d}" for d in checkdepths] + experiment1 = Experiment(None) + with pytest.raises(ValueError): + experiment1.perform(circuit_nodata) + experiment1.circuitfactory = cfactory1 + experiment1.perform(circuit_nodata) + assert np.allclose(experiment1.dataframe.values.flatten(), checkdepths) + experiment1.perform(nocirucuit_dict_filled) + assert np.array_equal(experiment1.dataframe.values[:, 0].flatten(), checkdepths) + assert np.array_equal( + experiment1.dataframe.values[:, 1].flatten(), checkdepthswithstring + ) + assert experiment1.dataframe.columns[0] == "depth" + assert experiment1.dataframe.columns[1] == "depthstr" + cfactory2 = Qibo1qGatesFactory(nqubits, depths * runs) + experiment2 = Experiment(cfactory2) + experiment2.perform(nocircuit_dict_empty) + assert experiment2.dataframe.columns[0] == "nshots" + experiment2.perform(circuit_data) + assert experiment2.dataframe.columns[0] == "nshots" + assert experiment2.dataframe.columns[1] == "samples" + for item in experiment2.data: + assert len(item["samples"]) == item["nshots"] + assert isinstance(item["samples"], np.ndarray) + + +@pytest.mark.parametrize("nqubits", [2, 3]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("qubits", [[0], [0, 1]]) +def test_Experiment_save_load(nqubits: int, depths: list, runs: int, qubits: list): + cfactory1 = Qibo1qGatesFactory(nqubits, depths * runs, qubits=qubits) + experiment1 = Experiment(cfactory1) + experiment1.perform(experiment1.execute) + path1 = experiment1.save() + experiment2 = Experiment.load(path1) + for datarow1, datarow2 in zip(experiment1.data, experiment2.data): + assert np.array_equal(datarow1["samples"], datarow2["samples"]) + assert experiment2.circuitfactory is None + + cfactory3 = Qibo1qGatesFactory(nqubits, depths * runs, qubits=qubits) + experiment3 = Experiment(cfactory3) + experiment3.prebuild() + path3 = experiment3.save() + experiment4 = Experiment.load(path3) + for circuit3, circuit4 in zip( + experiment3.circuitfactory, experiment4.circuitfactory + ): + assert np.array_equal(circuit3.unitary(), circuit4.unitary()) + + cfactory5 = Qibo1qGatesFactory(nqubits, depths * runs, qubits=qubits) + experiment5 = Experiment(cfactory5) + experiment5.prebuild() + path5 = experiment5.save() + + experiment6 = Experiment.load(path5) + assert experiment6.data is None + for circuit5, circuit6 in zip( + experiment5.circuitfactory, experiment6.circuitfactory + ): + assert np.array_equal(circuit5.unitary(), circuit6.unitary()) + + rmtree(path1) + rmtree(path3) + rmtree(path5) + if len(os.listdir("experiments/rb")) == 0: + rmtree("experiments/rb") + if len(os.listdir("experiments")) == 0: + rmtree("experiments/") + + +@pytest.mark.parametrize("amount_data", [10, 17]) +def test_Experiment_extract(amount_data): + def populate_experiment_data(circuit: Circuit, datarow: dict) -> dict: + datarow["rand_uniform"] = np.random.uniform(0, 1) + datarow["rand_normal"] = np.random.uniform(0, 1) + datarow["rand_int"] = np.random.randint(0, 2) + 10 + return datarow + + data = [{} for _ in range(amount_data)] + experiment1 = Experiment(None, data=data) + experiment1.perform(populate_experiment_data) + # No group key and not agg_type. + randnormal_array = experiment1.extract("rand_normal") + # No groupkey but agg_type. + assert np.mean(randnormal_array) == experiment1.extract("rand_normal", "", "mean") + # No agg_type but group key. + rand_ints, rand_uniforms = experiment1.extract( + "rand_uniform", groupby_key="rand_int" + ) + indx_randint = np.argsort(experiment1.dataframe["rand_int"].to_numpy()) + randints_tocheck = experiment1.dataframe["rand_int"].to_numpy()[indx_randint] + assert np.array_equal(randints_tocheck, rand_ints) + assert len(np.unique(rand_ints)) == 2 + until = np.count_nonzero(randints_tocheck == rand_ints[0]) + a_bit_sorted_uniforms = experiment1.dataframe["rand_uniform"].to_numpy()[ + indx_randint + ] + each_randint_uniforms = [rand_uniforms[:until], rand_uniforms[until:]] + each_randint_uniforms_check = [ + a_bit_sorted_uniforms[:until], + a_bit_sorted_uniforms[until:], + ] + # Since they does not neccessarily have to be ordered the same way: + for array1, array2 in zip(each_randint_uniforms, each_randint_uniforms_check): + for element in array1: + assert element in array2 + # Group key and agg_type. + rand_ints, mean_uniforms = experiment1.extract("rand_uniform", "rand_int", "mean") + assert np.isclose(mean_uniforms[0], np.mean(each_randint_uniforms_check[0])) + assert np.isclose(mean_uniforms[1], np.mean(each_randint_uniforms_check[1])) diff --git a/src/qibocal/tests/test_niGSC_fitting.py b/src/qibocal/tests/test_niGSC_fitting.py new file mode 100644 index 000000000..66f9aa18d --- /dev/null +++ b/src/qibocal/tests/test_niGSC_fitting.py @@ -0,0 +1,98 @@ +import warnings + +import numpy as np +import pytest + +from qibocal.calibrations.niGSC.basics import fitting + + +def test_1expfitting(): + success = 0 + number_runs = 50 + for _ in range(number_runs): + x = np.sort(np.random.choice(np.linspace(0, 15, 50), size=20, replace=False)) + A, f, B = np.random.uniform(0.1, 0.99, size=3) + y = A * f**x + B + assert np.allclose(fitting.exp1B_func(x, A, f, B), y) + # Distort ``y`` a bit. + y_dist = y + np.random.randn(len(y)) * 0.005 + popt, perr = fitting.fit_exp1B_func(x, y_dist) + success += np.all( + np.logical_or( + np.abs(np.array(popt) - [A, f, B]) < 2 * np.array(perr), + np.abs(np.array(popt) - [A, f, B]) < 0.01, + ) + ) + assert success >= number_runs * 0.8 + + success = 0 + number_runs = 50 + for _ in range(number_runs): + x = np.sort(np.random.choice(np.linspace(0, 15, 50), size=20, replace=False)) + A, f = np.random.uniform(0.1, 0.99, size=2) + y = A * f**x + # Distort ``y`` a bit. + y_dist = y + np.random.randn(len(y)) * 0.005 + popt, perr = fitting.fit_exp1_func(x, y_dist) + success += np.all( + np.logical_or( + np.abs(np.array(popt) - [A, f]) < 2 * np.array(perr), + np.abs(np.array(popt) - [A, f]) < 0.01, + ) + ) + assert success >= number_runs * 0.8 + + x = np.sort(np.random.choice(np.linspace(-5, 5, 50), size=20, replace=False)) + y = np.zeros(len(x)) + 0.75 + assert np.array_equal( + np.array(fitting.fit_exp1B_func(x, y)), np.array(((0.75, 1.0, 0), (0, 0, 0))) + ) + assert np.array_equal( + np.array(fitting.fit_exp1_func(x, y)), np.array(((0.75, 1.0), (0, 0))) + ) + # At least once the algorithm shall not find a fit: + didnt_getit = 0 + didnt_getitB = 0 + for _ in range(20): + x = np.sort(np.random.choice(np.linspace(0, 15, 50), size=50, replace=False)) + y_dist = np.e ** (-((x - 5) ** 2) * 10) + np.random.randn(len(x)) * 0.1 + popt1, perr1 = fitting.fit_exp1_func(x, y_dist, p0=[-100]) + didnt_getit += not (np.all(np.array([*popt1, *perr1]), 0)) + popt, perr = fitting.fit_exp1B_func(x, y_dist, p0=[-100, 0.01]) + didnt_getitB += not (np.all(np.array([*popt, *perr]), 0)) + assert didnt_getit >= 1 and didnt_getitB >= 1 + + +def test_exp2_fitting(): + success = 0 + number_runs = 50 + for count in range(number_runs): + x = np.arange(0, 50) + A1, A2 = np.random.uniform(0.1, 0.99, size=2) + if not count % 3: + f1, f2 = np.random.uniform(0.1, 0.5, size=2) * 1j + np.random.uniform( + 0.1, 0.99, size=2 + ) + else: + f1, f2 = np.random.uniform(0.1, 0.99, size=2) + y = A1 * f1**x + A2 * f2**x + assert np.allclose(fitting.exp2_func(x, A1, A2, f1, f2), y) + # Distort ``y`` a bit. + y_dist = y + np.random.uniform(-1, 1, size=len(y)) * 0.001 + popt, perr = fitting.fit_exp2_func(x, y_dist) + success += np.all( + np.logical_or( + np.allclose(np.array(popt), [A2, A1, f2, f1], atol=0.05, rtol=0.1), + np.allclose(np.array(popt), [A1, A2, f1, f2], atol=0.05, rtol=0.1), + ) + ) + # This is a pretty bad rate. The ESPRIT algorithm has to be optimized. + assert success >= number_runs * 0.4 + + with pytest.raises(ValueError): + x = np.array([1, 2, 3, 5]) + A1, A2, f1, f2 = np.random.uniform(0.1, 0.99, size=4) + y = A1 * f1**x + A2 * f2**x + # Distort ``y`` a bit. + y_dist = y + np.random.uniform(-1, 1, size=len(y)) * 0.001 + popt, perr = fitting.fit_exp2_func(x, y_dist) diff --git a/src/qibocal/tests/test_niGSC_noisemodels.py b/src/qibocal/tests/test_niGSC_noisemodels.py new file mode 100644 index 000000000..18303fd25 --- /dev/null +++ b/src/qibocal/tests/test_niGSC_noisemodels.py @@ -0,0 +1,43 @@ +import numpy as np +import pytest +import qibo + +from qibocal.calibrations.niGSC.basics import noisemodels + + +def test_PauliErrorOnUnitary(): + def test_model(pauli_onU_error): + assert isinstance(pauli_onU_error, qibo.noise.NoiseModel) + errorkeys = pauli_onU_error.errors.keys() + assert len(errorkeys) == 1 and list(errorkeys)[0] == qibo.gates.gates.Unitary + error = pauli_onU_error.errors[qibo.gates.gates.Unitary][0] + assert isinstance(error, qibo.noise.PauliError) + assert len(error.options) == 3 and np.sum(error.options) < 1 + + noise_model1 = noisemodels.PauliErrorOnUnitary() + test_model(noise_model1) + noise_model2 = noisemodels.PauliErrorOnUnitary(0.1, 0.1, 0.1) + test_model(noise_model2) + noise_model3 = noisemodels.PauliErrorOnUnitary(None) + test_model(noise_model3) + with pytest.raises(ValueError): + noise_model4 = noisemodels.PauliErrorOnUnitary(0.1, 0.2) + + +def test_PauliErrorOnX(): + def test_model(pauli_onX_error): + assert isinstance(pauli_onX_error, qibo.noise.NoiseModel) + errorkeys = pauli_onX_error.errors.keys() + assert len(errorkeys) == 1 and list(errorkeys)[0] == qibo.gates.gates.X + error = pauli_onX_error.errors[qibo.gates.gates.X][0] + assert isinstance(error, qibo.noise.PauliError) + assert len(error.options) == 3 and np.sum(error.options) < 1 + + noise_model1 = noisemodels.PauliErrorOnX() + test_model(noise_model1) + noise_model2 = noisemodels.PauliErrorOnX(0.1, 0.1, 0.1) + test_model(noise_model2) + noise_model3 = noisemodels.PauliErrorOnX(None) + test_model(noise_model3) + with pytest.raises(ValueError): + noise_model4 = noisemodels.PauliErrorOnX(0.1, 0.2) diff --git a/src/qibocal/tests/test_niGSC_qq.py b/src/qibocal/tests/test_niGSC_qq.py new file mode 100644 index 000000000..9829637a8 --- /dev/null +++ b/src/qibocal/tests/test_niGSC_qq.py @@ -0,0 +1,33 @@ +import os +from shutil import rmtree + +from qibocal.cli._base import ActionBuilder + + +def test_command_niGSC(): + path_to_runcard = "src/qibocal/tests/niGSC.yml" + test_folder = "test_and_delete/" + builder = ActionBuilder(path_to_runcard, test_folder, force=None) + builder.execute() + builder.dump_report() + paths_to_protocols = [ + "data/simulfilteredrb/", + "data/standardrb/", + "data/XIdrb/", + ] + paths_to_check = [f"{test_folder}{path}" for path in paths_to_protocols] + for path in paths_to_check: + assert os.path.isdir(path) + inside = ["experiment_data.pkl", "fit_plot.pkl"] + files_to_check = [[f"{path}{name}" for path in paths_to_check] for name in inside] + for filename_list in files_to_check: + for filename in filename_list: + assert os.path.isfile(filename) + # The circuits data is only stored with correlated rb. + assert os.path.isfile(f"{test_folder}/data/simulfilteredrb/circuits.pkl") + assert not os.path.isfile(f"{test_folder}/data/standardrb/circuits.pkl") + assert not os.path.isfile(f"{test_folder}/data/XIdrb/circuits.pkl") + assert os.path.isfile(f"{test_folder}index.html") + assert os.path.isfile(f"{test_folder}meta.yml") + assert os.path.isfile(f"{test_folder}runcard.yml") + rmtree(test_folder) diff --git a/src/qibocal/tests/test_niGSC_simulfilteredrb.py b/src/qibocal/tests/test_niGSC_simulfilteredrb.py new file mode 100644 index 000000000..50bfe61d3 --- /dev/null +++ b/src/qibocal/tests/test_niGSC_simulfilteredrb.py @@ -0,0 +1,202 @@ +import numpy as np +import pandas as pd +import pytest +from plotly.graph_objects import Figure +from qibo import gates, models + +from qibocal.calibrations.niGSC import simulfilteredrb +from qibocal.calibrations.niGSC.basics import noisemodels, utils + + +@pytest.fixture +def depths(): + return [0, 1, 5, 10, 30] + + +@pytest.fixture +def nshots(): + return 13 + + +@pytest.mark.parametrize("nqubits", [1, 2, 3]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("qubits", [[0], [0, 1]]) +def test_experiment(nqubits: int, depths: list, runs: int, nshots: int, qubits: list): + byaccident_zeros = 0 + if max(qubits) > nqubits - 1: + pass + else: + myfactory1 = simulfilteredrb.ModuleFactory(nqubits, depths * runs) + myexperiment1 = simulfilteredrb.ModuleExperiment(myfactory1, nshots=nshots) + myexperiment1.perform(myexperiment1.execute) + assert isinstance(myexperiment1.data, list) + assert isinstance(myexperiment1.data[0], dict) + for count, datarow in enumerate(myexperiment1.data): + assert len(datarow.keys()) == 2 + assert isinstance(datarow["samples"], np.ndarray) + assert len(datarow["samples"]) == nshots + assert isinstance(datarow["depth"], int) + assert datarow["depth"] == depths[count % len(depths)] + if not datarow["depth"]: + assert np.array_equal( + datarow["samples"], np.zeros(datarow["samples"].shape) + ) + else: + byaccident_zeros += np.array_equal( + datarow["samples"], np.zeros(datarow["samples"].shape) + ) + assert byaccident_zeros < len(depths) * runs / 2 + assert isinstance(myexperiment1.dataframe, pd.DataFrame) + + +@pytest.mark.parametrize("nqubits", [1, 3]) +@pytest.mark.parametrize("noise_params", [[0.1, 0.2, 0.1], [0.03, 0.17, 0.05]]) +def test_experiment_withnoise(nqubits: int, noise_params): + nshots = 512 + depths = [0, 5, 10] + runs = 2 + # Build the noise model. + noise = noisemodels.PauliErrorOnUnitary(*noise_params) + # Test exectue an experiment. + myfactory1 = simulfilteredrb.ModuleFactory(nqubits, depths * runs) + circuit_list = list(myfactory1) + myfaultyexperiment = simulfilteredrb.ModuleExperiment( + circuit_list, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + experiment1 = simulfilteredrb.ModuleExperiment(circuit_list, nshots=nshots) + experiment1.perform(experiment1.execute) + experiment12 = simulfilteredrb.ModuleExperiment(circuit_list, nshots=nshots) + experiment12.perform(experiment12.execute) + for datarow_faulty, datarow2, datarow3 in zip( + myfaultyexperiment.data, experiment1.data, experiment12.data + ): + if not datarow_faulty["depth"]: + assert np.array_equal(datarow_faulty["samples"], datarow2["samples"]) + assert np.array_equal(datarow2["samples"], datarow3["samples"]) + else: + probs1 = utils.probabilities(datarow_faulty["samples"]) + probs2 = utils.probabilities(datarow2["samples"]) + probs3 = utils.probabilities(datarow3["samples"]) + # Since the error channel maps the state into the maximally mixed state, + # if the state is already maximally mixed, they will not differ. + # So check if a probabiliy entry is zero. + if np.any(probs2 == 0): + assert np.linalg.norm(probs1 - probs2) > np.linalg.norm(probs2 - probs3) + + +def test_filterfunction(): + """Test if the filter function works, without noise.""" + + nqubits = 2 + nshots = 3000 + d = 2 + # Steal the class method for calculating clifford unitaries. + from qibo.quantum_info.random_ensembles import random_clifford + + # The first parameter is self, set it to None since it is not needed. + g1_matrix = random_clifford(1) + g1 = gates.Unitary(g1_matrix, 0) + g2_matrix = random_clifford(1) + g2 = gates.Unitary(g2_matrix, 0) + g3_matrix = random_clifford(1) + g3 = gates.Unitary(g3_matrix, 1) + g4_matrix = random_clifford(1) + g4 = gates.Unitary(g4_matrix, 1) + # Calculate the ideal unitary and the ideal outcomes. + g21 = g2_matrix @ g1_matrix + g43 = g4_matrix @ g3_matrix + ideal1 = g21 @ np.array([[1], [0]]) + ideal2 = g43 @ np.array([[1], [0]]) + # Build the circuit with the ideal unitaries. + c = models.Circuit(nqubits) + c.add([g1, g3, g2, g4]) + c.add(gates.M(0, 1)) + # Execute the circuit and get the samples. + samples = c(nshots=nshots).samples() + # Initiate the variables to store the four irrep signals. + a0, a1, a2, a3 = 0, 0, 0, 0 + for s in samples: + # lambda = (0,0) + a0 += 1 / d**2 + # lambda = (0,1) + a1 += 1 / d * np.abs(ideal2[s[1]]) ** 2 - 1 / d**2 + # lambda = (1,0) + a2 += 1 / d * np.abs(ideal1[s[0]]) ** 2 - 1 / d**2 + # lambda = (1,1) + a3 += ( + np.abs(ideal1[s[0]]) ** 2 * np.abs(ideal2[s[1]]) ** 2 + - 1 / d * np.abs(ideal1[s[0]]) ** 2 + - 1 / d * np.abs(ideal2[s[1]]) ** 2 + + 1 / d**2 + ) + a0 *= 1 / (nshots) + a1 *= (d + 1) / (nshots) + a2 *= (d + 1) / (nshots) + a3 *= (d + 1) ** 2 / (nshots) + # Now do the same but with an experiment, use a list with only + # the prebuild circuit (build it again because it was already executed). + # No noise. + c = models.Circuit(nqubits) + c.add([g1, g3, g2, g4]) + c.add(gates.M(0, 1)) + experiment = simulfilteredrb.ModuleExperiment([c], nshots=nshots) + experiment.perform(experiment.execute) + # Compute and get the filtered signals. + experiment.perform(simulfilteredrb.filter_function) + # Compare the above calculated filtered signals and the signals + # computed with the crosstalkrb method. + assert isinstance(experiment.data, list) + assert np.isclose(a0, experiment.data[0]["irrep0"]) + assert np.isclose(a1, experiment.data[0]["irrep1"]) + assert np.isclose(a2, experiment.data[0]["irrep2"]) + assert np.isclose(a3, experiment.data[0]["irrep3"]) + + +@pytest.mark.parametrize("nqubits", [2, 3]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("noise_params", [[0.1, 0.1, 0.1], [0.4, 0.2, 0.01]]) +def test_post_processing( + nqubits: int, depths: list, runs: int, nshots: int, noise_params: list +): + # Build the noise model. + noise = noisemodels.PauliErrorOnUnitary(*noise_params) + # Test exectue an experiment. + myfactory1 = simulfilteredrb.ModuleFactory(nqubits, list(depths) * runs) + myfaultyexperiment = simulfilteredrb.ModuleExperiment( + myfactory1, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + simulfilteredrb.post_processing_sequential(myfaultyexperiment) + aggr_df = simulfilteredrb.get_aggregational_data(myfaultyexperiment) + assert ( + len(aggr_df) == 2**nqubits + and aggr_df.index[0] == "irrep0" + and aggr_df.index[1] == "irrep1" + ) + assert "depth" in aggr_df.columns + assert "data" in aggr_df.columns + assert "2sigma" in aggr_df.columns + assert "fit_func" in aggr_df.columns + assert "popt" in aggr_df.columns + assert "perr" in aggr_df.columns + + +def test_build_report(): + depths = [1, 5, 10, 15, 20, 25] + nshots = 128 + runs = 10 + nqubits = 1 + noise_params = [0.01, 0.1, 0.05] + # Build the noise model. + noise = noisemodels.PauliErrorOnUnitary(*noise_params) + # Test exectue an experiment. + myfactory1 = simulfilteredrb.ModuleFactory(nqubits, depths * runs) + myfaultyexperiment = simulfilteredrb.ModuleExperiment( + myfactory1, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + simulfilteredrb.post_processing_sequential(myfaultyexperiment) + aggr_df = simulfilteredrb.get_aggregational_data(myfaultyexperiment) + report_figure = simulfilteredrb.build_report(myfaultyexperiment, aggr_df) + assert isinstance(report_figure, Figure) diff --git a/src/qibocal/tests/test_niGSC_standardrb.py b/src/qibocal/tests/test_niGSC_standardrb.py new file mode 100644 index 000000000..0a5f93061 --- /dev/null +++ b/src/qibocal/tests/test_niGSC_standardrb.py @@ -0,0 +1,190 @@ +from itertools import product + +import numpy as np +import pandas as pd +import pytest +from plotly.graph_objects import Figure +from qibo import gates +from qibo.noise import NoiseModel + +from qibocal.calibrations.niGSC import standardrb +from qibocal.calibrations.niGSC.basics import noisemodels, utils + + +def theoretical_outcome(noise_model: NoiseModel) -> float: + """Take the used noise model acting on unitaries and calculates the + effective depolarizing parameter. + + Args: + experiment (Experiment): Experiment which executed the simulation. + noisemodel (NoiseModel): Applied noise model. + + Returns: + (float): The effective depolarizing parameter of given error. + """ + + # TODO This has to be more systematic. Delete it from the branch which will be merged. + # Check for correctness of noise model and gate independence. + errorkeys = noise_model.errors.keys() + assert len(errorkeys) == 1 and list(errorkeys)[0] == gates.Unitary + # Extract the noise acting on unitaries and turn it into the associated + # error channel. + error = noise_model.errors[gates.Unitary][0] + errorchannel = error.channel(0, *error.options) + # Calculate the effective depolarizing parameter. + return utils.effective_depol(errorchannel) + + +@pytest.fixture +def depths(): + return [0, 1, 5, 10, 30] + + +@pytest.fixture +def nshots(): + return 13 + + +@pytest.mark.parametrize("nqubits", [1, 2]) +@pytest.mark.parametrize("runs", [1, 3]) +def test_experiment(nqubits: int, depths: list, runs: int, nshots: int): + # Test execute an experiment. + myfactory1 = standardrb.ModuleFactory(nqubits, list(depths) * runs) + myexperiment1 = standardrb.ModuleExperiment(myfactory1, nshots=nshots) + assert myexperiment1.name == "StandardRB" + myexperiment1.perform(myexperiment1.execute) + assert isinstance(myexperiment1.data, list) + assert isinstance(myexperiment1.data[0], dict) + for count, datarow in enumerate(myexperiment1.data): + assert len(datarow.keys()) == 2 + assert isinstance(datarow["samples"], np.ndarray) + assert len(datarow["samples"]) == nshots + assert isinstance(datarow["depth"], int) + assert datarow["depth"] == depths[count % len(depths)] + assert np.array_equal(datarow["samples"], np.zeros(datarow["samples"].shape)) + assert isinstance(myexperiment1.dataframe, pd.DataFrame) + + +@pytest.mark.parametrize("nqubits", [1, 3]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("noise_params", [[0.1, 0.1, 0.1], [0.02, 0.3, 0.07]]) +def test_experiment_withnoise( + nqubits: int, depths: list, runs: int, nshots: int, noise_params: list +): + # Build the noise model. + noise = noisemodels.PauliErrorOnUnitary(*noise_params) + # Test exectue an experiment. + myfactory1 = standardrb.ModuleFactory(nqubits, list(depths) * runs) + myfaultyexperiment = standardrb.ModuleExperiment( + myfactory1, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + assert isinstance(myfaultyexperiment.data, list) + assert isinstance(myfaultyexperiment.data[0], dict) + for count, datarow in enumerate(myfaultyexperiment.data): + assert len(datarow.keys()) == 2 + assert isinstance(datarow["samples"], np.ndarray) + assert len(datarow["samples"]) == nshots + assert isinstance(datarow["depth"], int) + assert datarow["depth"] == depths[count % len(depths)] + # If there are no executed gates (other than the measurement) or only one + # the probability that no error occured is too high. + if datarow["depth"] > 2: + assert not np.array_equal( + datarow["samples"], np.zeros(datarow["samples"].shape) + ) + assert isinstance(myfaultyexperiment.dataframe, pd.DataFrame) + + +@pytest.mark.parametrize("nqubits", [2, 3]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("qubits", [[0], [0, 1]]) +def test_embed_circuit(nqubits: int, depths: list, runs: int, qubits: list): + nshots = 2 + myfactory1 = standardrb.ModuleFactory(nqubits, list(depths) * runs, qubits=qubits) + test_list = list(product(qubits)) + test_list.append(tuple(qubits)) + for circuit in myfactory1: + assert circuit.nqubits == nqubits + for gate in circuit.queue: + assert gate._target_qubits in test_list + myexperiment1 = standardrb.ModuleExperiment(myfactory1, nshots=nshots) + myexperiment1.perform(myexperiment1.execute) + + +@pytest.mark.parametrize("nqubits", [2, 3]) +@pytest.mark.parametrize("runs", [1, 3]) +def test_utils_probs_and_noisy_execution( + nqubits: int, depths: list, runs: int, nshots: int +): + noise_params = [0.0001, 0.001, 0.0005] + # Build the noise model. + noise = noisemodels.PauliErrorOnUnitary(*noise_params) + # Test exectue an experiment. + myfactory1 = standardrb.ModuleFactory(nqubits, list(depths) * runs) + myfaultyexperiment = standardrb.ModuleExperiment( + myfactory1, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + myfaultyexperiment.perform(standardrb.groundstate_probabilities) + probs = utils.probabilities(myfaultyexperiment.extract("samples")) + assert probs.shape == (runs * len(depths), 2**nqubits) + assert np.allclose(np.sum(probs, axis=1), 1) + for probsarray in probs: + if probsarray[0] < 1.0: + assert np.all( + np.greater_equal(probsarray[0] * np.ones(len(probsarray)), probsarray) + ) + else: + assert probsarray[0] == 1.0 + + +@pytest.mark.parametrize("nqubits", [2, 3]) +@pytest.mark.parametrize("runs", [1, 3]) +def test_post_processing(nqubits: int, depths: list, runs: int, nshots: int): + noise_params = [0.01, 0.3, 0.14] + # Build the noise model. + noise = noisemodels.PauliErrorOnUnitary(*noise_params) + # Test exectue an experiment. + myfactory1 = standardrb.ModuleFactory(nqubits, list(depths) * runs) + myfaultyexperiment = standardrb.ModuleExperiment( + myfactory1, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + standardrb.post_processing_sequential(myfaultyexperiment) + probs = utils.probabilities(myfaultyexperiment.extract("samples")) + ground_probs = probs[:, 0] + test_ground_probs = myfaultyexperiment.extract("groundstate probability") + assert np.allclose(ground_probs, test_ground_probs) + aggr_df = standardrb.get_aggregational_data(myfaultyexperiment) + assert len(aggr_df) == 1 and aggr_df.index[0] == "groundstate probability" + assert "depth" in aggr_df.columns + assert "data" in aggr_df.columns + assert "2sigma" in aggr_df.columns + assert "fit_func" in aggr_df.columns + assert "popt" in aggr_df.columns + assert "perr" in aggr_df.columns + + +def test_build_report(): + depths = [1, 5, 10, 15, 20, 25] + nshots = 1024 + runs = 5 + nqubits = 1 + noise_params = [0.01, 0.1, 0.05] + # Build the noise model. + noise = noisemodels.PauliErrorOnUnitary(*noise_params) + # Test exectue an experiment. + myfactory1 = standardrb.ModuleFactory(nqubits, depths * runs) + myfaultyexperiment = standardrb.ModuleExperiment( + myfactory1, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + standardrb.post_processing_sequential(myfaultyexperiment) + aggr_df = standardrb.get_aggregational_data(myfaultyexperiment) + assert ( + theoretical_outcome(noise) - aggr_df.popt[0]["p"] + < 2 * aggr_df.perr[0]["p_err"] + theoretical_outcome(noise) * 0.01 + ) + report_figure = standardrb.build_report(myfaultyexperiment, aggr_df) + assert isinstance(report_figure, Figure) diff --git a/src/qibocal/tests/test_niGSC_xidrb.py b/src/qibocal/tests/test_niGSC_xidrb.py new file mode 100644 index 000000000..7b28169be --- /dev/null +++ b/src/qibocal/tests/test_niGSC_xidrb.py @@ -0,0 +1,138 @@ +import numpy as np +import pandas as pd +import pytest +from plotly.graph_objects import Figure +from qibo import gates +from qibo.noise import NoiseModel + +from qibocal.calibrations.niGSC import XIdrb +from qibocal.calibrations.niGSC.basics import noisemodels, utils + + +@pytest.fixture +def depths(): + return [0, 1, 2, 3, 4, 5] + + +@pytest.fixture +def nshots(): + return 27 + + +@pytest.mark.parametrize("nqubits", [1, 2]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("qubits", [[0], [1]]) +def test_experiment(nqubits: int, depths: list, runs: int, nshots: int, qubits: list): + if max(qubits) > nqubits - 1: + qubits = [0, 1] + with pytest.raises(ValueError): + myfactory1 = XIdrb.ModuleFactory(2, list(depths) * runs, qubits) + else: + myfactory1 = XIdrb.ModuleFactory(nqubits, list(depths) * runs, qubits) + myexperiment1 = XIdrb.ModuleExperiment(myfactory1, nshots=nshots) + assert myexperiment1.name == "XIdRB" + myexperiment1.perform(myexperiment1.execute) + assert isinstance(myexperiment1.data, list) + assert isinstance(myexperiment1.data[0], dict) + for count, datarow in enumerate(myexperiment1.data): + assert len(datarow.keys()) == 3 + assert isinstance(datarow["samples"], np.ndarray) + assert len(datarow["samples"]) == nshots + assert isinstance(datarow["depth"], int) + assert datarow["depth"] == depths[count % len(depths)] + assert np.array_equal( + datarow["samples"], + np.zeros(datarow["samples"].shape) + datarow["countX"] % 2, + ) + assert isinstance(myexperiment1.dataframe, pd.DataFrame) + + +@pytest.mark.parametrize("nqubits", [1, 3]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("noise_params", [[0.1, 0.1, 0.1], [0.02, 0.3, 0.07]]) +@pytest.mark.parametrize("qubits", [[0], [1]]) +def test_experiment_withnoise( + nqubits: int, depths: list, runs: int, qubits: list, noise_params: list +): + nshots = 100 + if max(qubits) > nqubits - 1: + pass + else: + # Build the noise model. + noise = noisemodels.PauliErrorOnX(*noise_params) + # Test exectue an experiment. + myfactory1 = XIdrb.ModuleFactory(nqubits, list(depths) * runs, qubits) + myfaultyexperiment = XIdrb.ModuleExperiment( + myfactory1, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + assert isinstance(myfaultyexperiment.data, list) + assert isinstance(myfaultyexperiment.data[0], dict) + for count, datarow in enumerate(myfaultyexperiment.data): + assert len(datarow.keys()) == 3 + assert isinstance(datarow["samples"], np.ndarray) + assert len(datarow["samples"]) == nshots + assert isinstance(datarow["depth"], int) + assert datarow["depth"] == depths[count % len(depths)] + if not datarow["countX"]: + assert np.array_equal( + datarow["samples"], np.zeros(datarow["samples"].shape) + ) + else: + theor_outcome = datarow["countX"] % 2 + assert not np.array_equal( + datarow["samples"], + np.zeros(datarow["samples"].shape) + theor_outcome, + ) + assert isinstance(myfaultyexperiment.dataframe, pd.DataFrame) + + +@pytest.mark.parametrize("nqubits", [1, 4]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("qubits", [[0], [2]]) +def test_post_processing( + nqubits: int, depths: list, runs: int, nshots: int, qubits: list +): + if max(qubits) > nqubits - 1: + pass + else: + noise_params = [0.01, 0.3, 0.14] + # Build the noise model. + noise = noisemodels.PauliErrorOnX(*noise_params) + # Test exectue an experiment. + myfactory1 = XIdrb.ModuleFactory(nqubits, list(depths) * runs, qubits) + myfaultyexperiment = XIdrb.ModuleExperiment( + myfactory1, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + XIdrb.post_processing_sequential(myfaultyexperiment) + aggr_df = XIdrb.get_aggregational_data(myfaultyexperiment) + assert len(aggr_df) == 1 and aggr_df.index[0] == "filter" + assert "depth" in aggr_df.columns + assert "data" in aggr_df.columns + assert "2sigma" in aggr_df.columns + assert "fit_func" in aggr_df.columns + assert "popt" in aggr_df.columns + assert "perr" in aggr_df.columns + + +@pytest.mark.parametrize("nqubits", [1, 5]) +@pytest.mark.parametrize("runs", [1, 3]) +@pytest.mark.parametrize("qubits", [[0], [2]]) +def test_build_report(depths: list, nshots: int, nqubits: int, runs: int, qubits: list): + if max(qubits) > nqubits - 1: + pass + else: + noise_params = [0.01, 0.1, 0.05] + # Build the noise model. + noise = noisemodels.PauliErrorOnX(*noise_params) + # Test exectue an experiment. + myfactory1 = XIdrb.ModuleFactory(nqubits, depths * runs, qubits) + myfaultyexperiment = XIdrb.ModuleExperiment( + myfactory1, nshots=nshots, noise_model=noise + ) + myfaultyexperiment.perform(myfaultyexperiment.execute) + XIdrb.post_processing_sequential(myfaultyexperiment) + aggr_df = XIdrb.get_aggregational_data(myfaultyexperiment) + report_figure = XIdrb.build_report(myfaultyexperiment, aggr_df) + assert isinstance(report_figure, Figure) diff --git a/src/qibocal/web/report.py b/src/qibocal/web/report.py index b5aedeb9e..063d96619 100644 --- a/src/qibocal/web/report.py +++ b/src/qibocal/web/report.py @@ -17,7 +17,6 @@ def create_report(path, actions=None): report = ReportBuilder(path, actions) env = Environment(loader=FileSystemLoader(filepath.with_name("templates"))) template = env.get_template("template.html") - html = template.render( is_static=True, css_styles=css_styles,