diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 149e3b9b2..a2fdc2e19 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -37,7 +37,6 @@ pygsti/algorithms/compilers.py @sandialabs/pygsti-rb @sandialabs/pygsti-gatekeep pygsti/algorithms/mirroring.py @sandialabs/pygsti-rb @sandialabs/pygsti-gatekeepers pygsti/algorithms/randomcircuit.py @sandialabs/pygsti-rb @sandialabs/pygsti-gatekeepers pygsti/algorithms/rbfit.py @sandialabs/pygsti-rb @sandialabs/pygsti-gatekeepers -pygsti/extras/rb.py @sandialabs/pygsti-rb @sandialabs/pygsti-gatekeepers # Should this just be deprecated and removed? pygsti/protocols/rb.py @sandialabs/pygsti-rb @sandialabs/pygsti-gatekeepers pygsti/tools/rbtheory.py @sandialabs/pygsti-rb @sandialabs/pygsti-gatekeepers pygsti/tools/rbtools.py @sandialabs/pygsti-rb @sandialabs/pygsti-gatekeepers diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index 6e0628264..ec94a3735 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -1149,8 +1149,6 @@ def find_closest_unitary_opmx(operation_mx): # d = _np.sqrt(operation_mx.shape[0]) # I = _np.identity(d) - #def getu_1q(basisVec): # 1 qubit version - # return _spl.expm( 1j * (basisVec[0]*_tools.sigmax + basisVec[1]*_tools.sigmay + basisVec[2]*_tools.sigmaz) ) def _get_gate_mx_1q(basis_vec): # 1 qubit version return _tools.single_qubit_gate(basis_vec[0], basis_vec[1], diff --git a/pygsti/algorithms/fiducialselection.py b/pygsti/algorithms/fiducialselection.py index 536db2847..6f75275ed 100644 --- a/pygsti/algorithms/fiducialselection.py +++ b/pygsti/algorithms/fiducialselection.py @@ -409,13 +409,6 @@ def final_result_test(final_fids, verb_printer): return prepFidList, measFidList -#def bool_list_to_ind_list(boolList): -# output = _np.array([]) -# for i, boolVal in boolList: -# if boolVal == 1: -# output = _np.append(i) -# return output - def xor(*args): """ Implements logical xor function for arbitrary number of inputs. diff --git a/pygsti/algorithms/germselection.py b/pygsti/algorithms/germselection.py index 48957b90e..94c156c01 100644 --- a/pygsti/algorithms/germselection.py +++ b/pygsti/algorithms/germselection.py @@ -3295,80 +3295,81 @@ def symmetric_low_rank_spectrum_update(update, orig_e, U, proj_U, force_rank_inc #return the new eigenvalues return new_evals, True -#Note: This function won't work for our purposes because of the assumptions -#about the rank of the update on the nullspace of the matrix we're updating, -#but keeping this here commented for future reference. -#Function for doing fast calculation of the updated inverse trace: -#def riedel_style_inverse_trace(update, orig_e, U, proj_U, force_rank_increase=True): -# """ -# input: -# -# update : ndarray -# symmetric low-rank update to perform. -# This is the first half the symmetric rank decomposition s.t. -# update@update.T= the full update matrix. -# -# orig_e : ndarray -# Spectrum of the original matrix. This is a 1-D array. -# -# proj_U : ndarray -# Projector onto the complement of the column space of the -# original matrix's eigenvectors. -# -# output: -# -# trace : float -# Value of the trace of the updated psuedoinverse matrix. -# -# updated_rank : int -# total rank of the updated matrix. -# -# rank_increase_flag : bool -# a flag that is returned to indicate is a candidate germ failed to amplify additional parameters. -# This indicates things short circuited and so the scoring function should skip this germ. -# """ -# -# #First we need to for the matrix P, whose column space -# #forms an orthonormal basis for the component of update -# #that is in the complement of U. -# -# proj_update= proj_U@update -# -# #Next take the RRQR decomposition of this matrix: -# q_update, r_update, _ = _sla.qr(proj_update, mode='economic', pivoting=True) -# -# #Construct P by taking the columns of q_update corresponding to non-zero values of r_A on the diagonal. -# nonzero_indices_update= _np.nonzero(_np.diag(r_update)>1e-10) #HARDCODED (threshold is hardcoded) -# -# #if the rank doesn't increase then we can't use the Riedel approach. -# #Abort early and return a flag to indicate the rank did not increase. -# if len(nonzero_indices_update[0])==0 and force_rank_increase: -# return None, None, False -# -# P= q_update[: , nonzero_indices_update[0]] -# -# updated_rank= len(orig_e)+ len(nonzero_indices_update[0]) -# -# #Now form the matrix R_update which is given by P.T @ proj_update. -# R_update= P.T@proj_update -# -# #R_update gets concatenated with U.T@update to form -# #a block column matrixblock_column= np.concatenate([U.T@update, R_update], axis=0) -# -# Uta= U.T@update -# -# try: -# RRRDinv= R_update@_np.linalg.inv(R_update.T@R_update) -# except _np.linalg.LinAlgError as err: -# print('Numpy thinks this matrix is singular, condition number is: ', _np.linalg.cond(R_update.T@R_update)) -# print((R_update.T@R_update).shape) -# raise err -# pinv_orig_e_mat= _np.diag(1/orig_e) -# -# trace= _np.sum(1/orig_e) + _np.trace( RRRDinv@(_np.eye(Uta.shape[1]) + Uta.T@pinv_orig_e_mat@Uta)@RRRDinv.T ) -# -# return trace, updated_rank, True +# Note: Th function below won't work for our purposes because of the assumptions +# about the rank of the update on the nullspace of the matrix we're updating, +# but keeping this here commented for future reference. +''' +def riedel_style_inverse_trace(update, orig_e, U, proj_U, force_rank_increase=True): + """ + input: + + update : ndarray + symmetric low-rank update to perform. + This is the first half the symmetric rank decomposition s.t. + update@update.T= the full update matrix. + + orig_e : ndarray + Spectrum of the original matrix. This is a 1-D array. + + proj_U : ndarray + Projector onto the complement of the column space of the + original matrix's eigenvectors. + + output: + + trace : float + Value of the trace of the updated psuedoinverse matrix. + + updated_rank : int + total rank of the updated matrix. + + rank_increase_flag : bool + a flag that is returned to indicate is a candidate germ failed to amplify additional parameters. + This indicates things short circuited and so the scoring function should skip this germ. + """ + #First we need to for the matrix P, whose column space + #forms an orthonormal basis for the component of update + #that is in the complement of U. + + proj_update= proj_U@update + + #Next take the RRQR decomposition of this matrix: + q_update, r_update, _ = _sla.qr(proj_update, mode='economic', pivoting=True) + + #Construct P by taking the columns of q_update corresponding to non-zero values of r_A on the diagonal. + nonzero_indices_update= _np.nonzero(_np.diag(r_update)>1e-10) #HARDCODED (threshold is hardcoded) + + #if the rank doesn't increase then we can't use the Riedel approach. + #Abort early and return a flag to indicate the rank did not increase. + if len(nonzero_indices_update[0])==0 and force_rank_increase: + return None, None, False + + P= q_update[: , nonzero_indices_update[0]] + + updated_rank= len(orig_e)+ len(nonzero_indices_update[0]) + + #Now form the matrix R_update which is given by P.T @ proj_update. + R_update= P.T@proj_update + + #R_update gets concatenated with U.T@update to form + #a block column matrixblock_column= np.concatenate([U.T@update, R_update], axis=0) + + Uta= U.T@update + + try: + RRRDinv= R_update@_np.linalg.inv(R_update.T@R_update) + except _np.linalg.LinAlgError as err: + print('Numpy thinks this matrix is singular, condition number is: ', _np.linalg.cond(R_update.T@R_update)) + print((R_update.T@R_update).shape) + raise err + pinv_orig_e_mat= _np.diag(1/orig_e) + + trace= _np.sum(1/orig_e) + _np.trace( RRRDinv@(_np.eye(Uta.shape[1]) + Uta.T@pinv_orig_e_mat@Uta)@RRRDinv.T ) + + return trace, updated_rank, True +''' + def minamide_style_inverse_trace(update, orig_e, U, proj_U, force_rank_increase=False): """ This function performs a low-rank update to the components of diff --git a/pygsti/algorithms/mirroring.py b/pygsti/algorithms/mirroring.py index 74dc2bcec..a6b4ee49d 100644 --- a/pygsti/algorithms/mirroring.py +++ b/pygsti/algorithms/mirroring.py @@ -21,92 +21,6 @@ from . import randomcircuit as _rc -# ### TODO: THIS IS TIMS OLD CODE WHICH SHOULD PERHAPS ALSO BE AN OPTION IN THE `CREATE_MIRROR_CIRCUIT` FUNCTION -# def create_mirror_circuit(circ, pspec, circtype='Clifford+Gzr', pauli_labels=None, pluspi_prob=0.): -# """ -# ***************************************************************** -# Function currently has the following limitations that need fixing: - -# - A layer contains only Clifford or Gzr gates on ALL the qubits. -# - all of the Clifford gates are self inverse -# - The qubits are labelled "Q0" through "Qn-1" -- THIS SHOULD NOW BE FIXED! -# - Pauli's are labelled by "Gi", "Gxpi", "Gypi" and "Gzpi". -# - There's no option for randomized prep/meas -# - There's no option for randomly adding +/-pi to the Z rotation angles. -# - There's no option for adding "barriers" -# - There's no test that the 'Gzr' gate has the "correct" convention for a rotation angle -# (a rotation by pi must be a Z gate) or that it's a rotation around Z. -# ***************************************************************** -# """ -# assert(circtype == 'Clifford+Gzr' or circtype == 'Clifford') -# n = circ.width -# d = circ.depth -# if pauli_labels is None: pauli_labels = ['Gi', 'Gxpi', 'Gypi', 'Gzpi'] -# qubits = circ.line_labels -# identity = _np.identity(2 * n, _np.int64) -# zrotname = 'Gzr' -# # qubit_labels = ['G{}'.format(i) for i in range(n)] - -# _, gate_inverse = pspec.compute_one_qubit_gate_relations() -# gate_inverse.update(pspec.compute_multiqubit_inversion_relations()) # add multiQ inverses - -# quasi_inverse_circ = [] -# central_pauli_circ = _cir.Circuit([[_lbl.Label(pauli_labels[_np.random.randint(0, 4)], q) for q in qubits]]) -# #telescoping_pauli = central_pauli_layer.copy() -# # The telescoping Pauli in the symplectic rep. -# telp_s, telp_p = _symp.symplectic_rep_of_clifford_circuit(central_pauli_circ, pspec=pspec) -# assert(_np.sum(_np.abs(telp_s - identity)) <= 1e-8) # Check that it's a Pauli. - -# for d_ind in range(d): -# layer = circ.layer(d - d_ind - 1) -# if layer[0].name == zrotname: -# quasi_inverse_layer = [] -# for gate in layer: - -# q_int = qubits.index(gate.qubits[0]) -# angle = float(gate.args[0]) - -# if telp_p[n + q_int] == 0: rotation_sign = -1. # If the Pauli is Z or I. -# else: rotation_sign = +1 # If the Pauli is X or Y. - -# # Sets the quasi inversion angle to + or - the original angle, depending on the Paul -# quasi_inverse_angle = rotation_sign * angle -# # Decides whether to add with to add +/- pi to the rotation angle. -# if _np.random.binomial(1, pluspi_prob) == 1: -# quasi_inverse_angle += _np.pi * (-1)**_np.random.binomial(1, 0.5) -# quasi_inverse_angle = _comp.mod_2pi(quasi_inverse_angle) -# # Updates the telescoping Pauli (in the symplectic rep_, to include this added pi-rotation, -# # as we need to include it as we keep collapsing the circuit down. -# telp_p[q_int] = (telp_p[q_int] + 2) % 4 -# # Constructs the quasi-inverse gate. -# quasi_inverse_gate = _lbl.Label(zrotname, gate.qubits, args=(str(quasi_inverse_angle),)) -# quasi_inverse_layer.append(quasi_inverse_gate) - -# # We don't have to update the telescoping Pauli as it's unchanged, but when we update -# # this it'll need to change. -# #telp_p = telp_p - -# else: -# quasi_inverse_layer = [_lbl.Label(gate_inverse[gate.name], gate.qubits) for gate in layer] -# telp_layer = _symp.find_pauli_layer(telp_p, pauli_labels, qubits) -# conjugation_circ = _cir.Circuit([layer, telp_layer, quasi_inverse_layer]) -# # We calculate what the new telescoping Pauli is, in the symplectic rep. -# telp_s, telp_p = _symp.symplectic_rep_of_clifford_circuit(conjugation_circ, pspec=pspec) - -# # Check that the layer -- pauli -- quasi-inverse circuit implements a Pauli. -# assert(_np.sum(_np.abs(telp_s - identity)) <= 1e-10) -# # Add the quasi inverse layer that we've constructed to the end of the quasi inverse circuit. -# quasi_inverse_circ.append(quasi_inverse_layer) - -# # now that we've completed the quasi inverse circuit we convert it to a Circuit object -# quasi_inverse_circ = _cir.Circuit(quasi_inverse_circ) - -# # Calculate the bit string that this mirror circuit should output, from the final telescoped Pauli. -# target_bitstring = ''.join(['1' if p == 2 else '0' for p in telp_p[n:]]) -# mirror_circuit = circ + central_pauli_circ + quasi_inverse_circ - -# return mirror_circuit, target_bitstring - def create_mirror_circuit(circ, pspec, circ_type='clifford+zxzxz'): """ @@ -313,305 +227,3 @@ def compute_gate_inverse(gate_label): mirror_circuit = _cir.Circuit(mc, line_labels=circ.line_labels) return mirror_circuit, target_bitstring - - -# #generate mirror circuits with pauli frame randomization. no random +pi needed -# #as we construct the quasi-inverse, we generate random pauli layers, and compile them into the unitaries -# #we'll need to recompute the angles needed for the z rotations - -# def create_nc_mirror_circuit(circ, pspec, circtype='Clifford+Gzr'): - -# assert(circtype == 'Clifford+Gzr' or circtype == 'Clifford') -# n = circ.width -# d = circ.depth -# pauli_labels = ['I', 'X', 'Y', 'Z'] -# qubits = circ.line_labels -# identity = _np.identity(2 * n, _np.int64) -# zrotname = 'Gzr' -# # qubit_labels = ['G{}'.format(i) for i in range(n)] - -# _, gate_inverse = pspec.compute_one_qubit_gate_relations() -# gate_inverse.update(pspec.compute_multiqubit_inversion_relations()) # add multiQ inverses -# #for gname in pspec.gate_names: -# # assert(gname in gate_inverse), \ -# # "%s gate does not have an inverse in the gate-set! MRB is not possible!" % gname - -# quasi_inverse_circ = [] - -# Xpi2layer = [_lbl.Label('Gc16', qubits[t]) for t in range(n)] -# c = circ.copy(editable=True) - -# #build the inverse -# d_ind = 0 -# while d_ind 0 the number of circuits generated so far is shown. - -# Returns -# ------- -# dict -# A dictionary containing the generated random circuits, the error-free outputs of the circuit, -# and the specification used to generate the circuits. The keys are: - -# - 'circuits'. A dictionary of the sampled circuits. The circuit with key(l,k) is the kth circuit -# at length l. - -# - 'probs'. A dictionary of the error-free *marginalized* probabilities for the "1" outcome of -# a computational basis measurement at the end of each circuit, with the standard input state. -# The ith element of this tuple corresponds to this probability for the qubit on the ith wire of -# the output circuit. - -# - 'qubitordering'. The ordering of the qubits in the 'target' tuples. - -# - 'spec'. A dictionary containing all of the parameters handed to this function, except `pspec`. -# This then specifies how the circuits where generated. -# """ -# experiment_dict = {} -# experiment_dict['spec'] = {} -# experiment_dict['spec']['depths'] = depths -# experiment_dict['spec']['circuits_per_length'] = circuits_per_length -# experiment_dict['spec']['sampler'] = sampler -# experiment_dict['spec']['samplerargs'] = samplerargs -# experiment_dict['spec']['addlocal'] = addlocal -# experiment_dict['spec']['lsargs'] = lsargs -# experiment_dict['spec']['descriptor'] = descriptor -# experiment_dict['spec']['createdby'] = 'extras.rb.sample.simultaneous_random_circuits_experiment' - -# if isinstance(structure, str): -# assert(structure == '1Q'), "The only default `structure` option is the string '1Q'" -# structure = tuple([(q,) for q in pspec.qubit_labels]) -# else: -# assert(isinstance(structure, list) or isinstance(structure, tuple)), \ -# "If not a string, `structure` must be a list or tuple." -# qubits_used = [] -# for qubit_labels in structure: -# assert(isinstance(qubit_labels, list) or isinstance( -# qubit_labels, tuple)), "SubsetQs must be a list or a tuple!" -# qubits_used = qubits_used + list(qubit_labels) -# assert(len(set(qubits_used)) == len(qubits_used)), \ -# "The qubits in the tuples/lists of `structure must all be unique!" - -# assert(set(qubits_used).issubset(set(pspec.qubit_labels))), \ -# "The qubits to benchmark must all be in the QubitProcessorSpec `pspec`!" - -# experiment_dict['spec']['structure'] = structure -# experiment_dict['circuits'] = {} -# experiment_dict['probs'] = {} -# experiment_dict['settings'] = {} - -# for lnum, l in enumerate(depths): -# if verbosity > 0: -# print('- Sampling {} circuits at length {} ({} of {} depths)'.format(circuits_per_length, l, -# lnum + 1, len(depths))) -# print(' - Number of circuits sampled = ', end='') -# for j in range(circuits_per_length): -# circuit, idealout = sample_simultaneous_random_circuit(pspec, l, structure=structure, sampler=sampler, -# samplerargs=samplerargs, addlocal=addlocal, -# lsargs=lsargs) - -# if (not set_isolated) and (not setcomplement_isolated): -# experiment_dict['circuits'][l, j] = circuit -# experiment_dict['probs'][l, j] = idealout -# experiment_dict['settings'][l, j] = { -# s: len(depths) + lnum * circuits_per_length + j for s in tuple(structure)} -# else: -# experiment_dict['circuits'][l, j] = {} -# experiment_dict['probs'][l, j] = {} -# experiment_dict['settings'][l, j] = {} -# experiment_dict['circuits'][l, j][tuple(structure)] = circuit -# experiment_dict['probs'][l, j][tuple(structure)] = idealout -# experiment_dict['settings'][l, j][tuple(structure)] = _get_setting(l, j, structure, depths, -# circuits_per_length, structure) -# if set_isolated: -# for subset_ind, subset in enumerate(structure): -# subset_circuit = circuit.copy(editable=True) -# #print(subset) -# for q in circuit.line_labels: -# if q not in subset: -# #print(subset_circuit, q) -# subset_circuit.replace_with_idling_line_inplace(q) -# subset_circuit.done_editing() -# experiment_dict['circuits'][l, j][(tuple(subset),)] = subset_circuit -# experiment_dict['probs'][l, j][(tuple(subset),)] = idealout[subset_ind] -# # setting = {} -# # for s in structure: -# # if s in subset: -# # setting[s] = len(depths) + lnum*circuits_per_length + j -# # else: -# # setting[s] = lnum -# experiment_dict['settings'][l, j][(tuple(subset),)] = _get_setting(l, j, (tuple(subset),), depths, -# circuits_per_length, structure) -# # print(subset) -# # print(_get_setting(l, j, subset, depths, circuits_per_length, structure)) - -# if setcomplement_isolated: -# for subset_ind, subset in enumerate(structure): -# subsetcomplement_circuit = circuit.copy(editable=True) -# for q in circuit.line_labels: -# if q in subset: -# subsetcomplement_circuit.replace_with_idling_line_inplace(q) -# subsetcomplement_circuit.done_editing() -# subsetcomplement = list(_copy.copy(structure)) -# subsetcomplement_idealout = list(_copy.copy(idealout)) -# del subsetcomplement[subset_ind] -# del subsetcomplement_idealout[subset_ind] -# subsetcomplement = tuple(subsetcomplement) -# subsetcomplement_idealout = tuple(subsetcomplement_idealout) -# experiment_dict['circuits'][l, j][subsetcomplement] = subsetcomplement_circuit -# experiment_dict['probs'][l, j][subsetcomplement] = subsetcomplement_idealout - -# # for s in structure: -# # if s in subsetcomplement: -# # setting[s] = len(depths) + lnum*circuits_per_length + j -# # else: -# # setting[s] = lnum -# experiment_dict['settings'][l, j][subsetcomplement] = _get_setting(l, j, subsetcomplement, depths, -# circuits_per_length, structure) - -# if verbosity > 0: print(j + 1, end=',') -# if verbosity > 0: print('') - -# return experiment_dict - - -# def create_exhaustive_independent_random_circuits_experiment(pspec, allowed_depths, circuits_per_subset, -# structure='1Q', -# sampler='Qelimination', samplerargs=[], descriptor='', -# verbosity=1, seed=None): -# """ -# Todo - -# Parameters -# ---------- -# pspec : QubitProcessorSpec -# The QubitProcessorSpec for the device that the circuit is being sampled for, which defines the -# "native" gate-set and the connectivity of the device. The returned circuit will be over -# the gates in `pspec`, and will respect the connectivity encoded by `pspec`. Note that `pspec` -# is always handed to the sampler, as the first argument of the sampler function (this is only -# of importance when not using an in-built sampler). - -# allowed_depths : -# - -# circuits_per_subset : -# - -# structure : str or tuple. -# Defines the "structure" of the simultaneous circuit. TODO : more details. - -# sampler : str or function, optional -# If a string, this should be one of: {'pairingQs', 'Qelimination', 'co2Qgates', 'local'}. -# Except for 'local', this corresponds to sampling layers according to the sampling function -# in rb.sampler named circuit_layer_by* (with * replaced by 'sampler'). For 'local', this -# corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates. -# If `sampler` is a function, it should be a function that takes as the first argument a -# QubitProcessorSpec, and returns a random circuit layer as a list of gate Label objects. Note that -# the default 'Qelimination' is not necessarily the most useful in-built sampler, but it is the -# only sampler that requires no parameters beyond the QubitProcessorSpec *and* works for arbitrary -# connectivity devices. See the docstrings for each of these samplers for more information. - -# samplerargs : list, optional -# A list of arguments that are handed to the sampler function, specified by `sampler`. -# The first argument handed to the sampler is `pspec`, the second argument is `qubit_labels`, -# and `samplerargs` lists the remaining arguments handed to the sampler. This is not -# optional for some choices of `sampler`. - -# descriptor : str, optional -# A description of the experiment being generated. Stored in the output dictionary. - -# verbosity : int, optional -# How much output to sent to stdout. - -# seed : int, optional -# Seed for RNG - -# Returns -# ------- -# dict -# """ -# experiment_dict = {} -# experiment_dict['spec'] = {} -# experiment_dict['spec']['allowed_depths'] = allowed_depths -# experiment_dict['spec']['circuits_per_subset'] = circuits_per_subset -# experiment_dict['spec']['sampler'] = sampler -# experiment_dict['spec']['samplerargs'] = samplerargs -# experiment_dict['spec']['descriptor'] = descriptor - -# if isinstance(structure, str): -# assert(structure == '1Q'), "The only default `structure` option is the string '1Q'" -# structure = tuple([(q,) for q in pspec.qubit_labels]) -# else: -# assert(isinstance(structure, list) or isinstance(structure, tuple)), \ -# "If not a string, `structure` must be a list or tuple." -# qubits_used = [] -# for qubit_labels in structure: -# assert(isinstance(qubit_labels, list) or isinstance( -# qubit_labels, tuple)), "SubsetQs must be a list or a tuple!" -# qubits_used = qubits_used + list(qubit_labels) -# assert(len(set(qubits_used)) == len(qubits_used)), \ -# "The qubits in the tuples/lists of `structure must all be unique!" - -# assert(set(qubits_used).issubset(set(pspec.qubit_labels))), \ -# "The qubits to benchmark must all be in the QubitProcessorSpec `pspec`!" - -# rand_state = _np.random.RandomState(seed) # OK if seed is None - -# experiment_dict['spec']['structure'] = structure -# experiment_dict['circuits'] = {} -# experiment_dict['probs'] = {} - -# if circuits_per_subset**len(structure) >> 10000: -# print("Warning: {} circuits are going to be generated by this function!".format( -# circuits_per_subset**len(structure))) - -# circuits = {} - -# for ssQs_ind, qubit_labels in enumerate(structure): -# circuits[qubit_labels] = [] -# for i in range(circuits_per_subset): -# l = allowed_depths[rand_state.randint(len(allowed_depths))] -# circuits[qubit_labels].append(create_random_circuit(pspec, l, qubit_labels=qubit_labels, -# sampler=sampler, samplerargs=samplerargs)) - -# experiment_dict['subset_circuits'] = circuits - -# parallel_circuits = {} -# it = [range(circuits_per_subset) for i in range(len(structure))] -# for setting_comb in _itertools.product(*it): -# pcircuit = _cir.Circuit(num_lines=0, editable=True) -# for ssQs_ind, qubit_labels in enumerate(structure): -# pcircuit.tensor_circuit_inplace(circuits[qubit_labels][setting_comb[ssQs_ind]]) -# pcircuit.done_editing() # TIM: is this indented properly? -# parallel_circuits[setting_comb] = pcircuit - -# experiment_dict['circuits'] = parallel_circuits - -# return experiment_dict - def create_direct_rb_circuit(pspec, clifford_compilations, length, qubit_labels=None, sampler='Qelimination', samplerargs=None, addlocal=False, lsargs=None, randomizeout=True, cliffordtwirl=True, @@ -1578,564 +1005,6 @@ def create_direct_rb_circuit(pspec, clifford_compilations, length, qubit_labels= return outcircuit, idealout -#### Commented out as all of this functionality should be reproducable using simulataneous experiment designs applied -#### to DirectRB experiment designs. -# def sample_simultaneous_direct_rb_circuit(pspec, clifford_compilations, length, structure='1Q', -# sampler='Qelimination', -# samplerargs=[], addlocal=False, lsargs=[], randomizeout=True, -# cliffordtwirl=True, conditionaltwirl=True, citerations=20, compilerargs=[], -# partitioned=False, seed=1234): -# """ -# Generates a simultaneous "direct randomized benchmarking" (DRB) circuit. - -# DRB is the protocol introduced in arXiv:1807.07975 (2018). An n-qubit DRB circuit consists of -# (1) a circuit the prepares a uniformly random stabilizer state; (2) a length-l circuit -# (specified by `length`) consisting of circuit layers sampled according to some user-specified -# distribution (specified by `sampler`), (3) a circuit that maps the output of the preceeding -# circuit to a computational basis state. See arXiv:1807.07975 (2018) for further details. Todo : -# what SDRB is. - -# Parameters -# ---------- -# pspec : QubitProcessorSpec -# The QubitProcessorSpec for the device that the circuit is being sampled for, which defines the -# "native" gate-set and the connectivity of the device. The returned DRB circuit will be over -# the gates in `pspec`, and will respect the connectivity encoded by `pspec`. Note that `pspec` -# is always handed to the sampler, as the first argument of the sampler function (this is only -# of importance when not using an in-built sampler for the "core" of the DRB circuit). Unless -# `qubit_labels` is not None, the circuit is sampled over all the qubits in `pspec`. - -# clifford_compilations : dict -# A dictionary with the potential keys `'absolute'` and `'paulieq'` and corresponding -# :class:`CompilationRules` values. These compilation rules specify how to compile the -# "native" gates of `pspec` into Clifford gates. - -# length : int -# The "direct RB length" of the circuit, which is closely related to the circuit depth. It -# must be an integer >= 0. Unless `addlocal` is True, it is the depth of the "core" random -# circuit, sampled according to `sampler`, specified in step (2) above. If `addlocal` is True, -# each layer in the "core" circuit sampled according to "sampler` is followed by a layer of -# 1-qubit gates, with sampling specified by `lsargs` (and the first layer is proceeded by a -# layer of 1-qubit gates), and so the circuit of step (2) is length 2*`length` + 1. - -# structure : str or tuple, optional -# todo. - -# sampler : str or function, optional -# If a string, this should be one of: {'pairingQs', 'Qelimination', 'co2Qgates', 'local'}. -# Except for 'local', this corresponds to sampling layers according to the sampling function -# in rb.sampler named circuit_layer_by* (with * replaced by 'sampler'). For 'local', this -# corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates [which is not -# a valid form of sampling for n-qubit DRB, but is not explicitly forbidden in this function]. -# If `sampler` is a function, it should be a function that takes as the first argument a -# QubitProcessorSpec, and returns a random circuit layer as a list of gate Label objects. Note that -# the default 'Qelimination' is not necessarily the most useful in-built sampler, but it is -# the only sampler that requires no parameters beyond the QubitProcessorSpec *and* works for arbitrary -# connectivity devices. See the docstrings for each of these samplers for more information. - -# samplerargs : list, optional -# A list of arguments that are handed to the sampler function, specified by `sampler`. -# The first argument handed to the sampler is `pspec`, the second argument is `qubit_labels`, -# and `samplerargs` lists the remaining arguments handed to the sampler. This is not -# optional for some choices of `sampler`. - -# addlocal : bool, optional -# Whether to follow each layer in the "core" circuit, sampled according to `sampler` with -# a layer of 1-qubit gates. - -# lsargs : list, optional -# Only used if addlocal is True. A list of optional arguments handed to the 1Q gate -# layer sampler circuit_layer_by_oneQgate(). Specifies how to sample 1Q-gate layers. - -# randomizeout : bool, optional -# If False, the ideal output of the circuit (the "success" or "survival" outcome) is the all-zeros -# bit string. If True, the ideal output of the circuit is randomized to a uniformly random bit-string. -# This setting is useful for, e.g., detecting leakage/loss/measurement-bias etc. - -# cliffordtwirl : bool, optional -# Wether to begin the circuit with a sequence that generates a random stabilizer state. For -# standard DRB this should be set to True. There are a variety of reasons why it is better -# to have this set to True. - -# conditionaltwirl : bool, optional -# DRB only requires that the initial/final sequences of step (1) and (3) create/measure -# a uniformly random / particular stabilizer state, rather than implement a particular unitary. -# step (1) and (3) can be achieved by implementing a uniformly random Clifford gate and the -# unique inversion Clifford, respectively. This is implemented if `conditionaltwirl` is False. -# However, steps (1) and (3) can be implemented much more efficiently than this: the sequences -# of (1) and (3) only need to map a particular input state to a particular output state, -# if `conditionaltwirl` is True this more efficient option is chosen -- this is option corresponds -# to "standard" DRB. (the term "conditional" refers to the fact that in this case we essentially -# implementing a particular Clifford conditional on a known input). - -# citerations : int, optional -# Some of the stabilizer state / Clifford compilation algorithms in pyGSTi (including the default -# algorithms) are randomized, and the lowest-cost circuit is chosen from all the circuit generated -# in the iterations of the algorithm. This is the number of iterations used. The time required to -# generate a DRB circuit is linear in `citerations`. Lower-depth / lower 2-qubit gate count -# compilations of steps (1) and (3) are important in order to successfully implement DRB on as many -# qubits as possible. - -# compilerargs : list, optional -# A list of arguments that are handed to the compile_stabilier_state/measurement()functions (or the -# compile_clifford() function if `conditionaltwirl `is False). This includes all the optional -# arguments of these functions *after* the `iterations` option (set by `citerations`). For most -# purposes the default options will be suitable (or at least near-optimal from the compilation methods -# in-built into pyGSTi). See the docstrings of these functions for more information. - -# partitioned : bool, optional -# If False, a single circuit is returned consisting of the full circuit. If True, three circuits -# are returned in a list consisting of: (1) the stabilizer-prep circuit, (2) the core random circuit, -# (3) the pre-measurement circuit. In that case the full circuit is obtained by appended (2) to (1) -# and then (3) to (1). - -# seed: int, optional -# Seed for RNG - -# Returns -# ------- -# Circuit or list of Circuits -# If partioned is False, a random DRB circuit sampled as specified. If partioned is True, a list of -# three circuits consisting of (1) the stabilizer-prep circuit, (2) the core random circuit, -# (3) the pre-measurement circuit. In that case the full circuit is obtained by appended (2) to (1) -# and then (3) to (1) [except in the case of cliffordtwirl=False, when it is a list of two circuits]. -# Tuple -# A length-n tuple of integers in [0,1], corresponding to the error-free outcome of the -# circuit. Always all zeros if `randomizeout` is False. The ith element of the tuple -# corresponds to the error-free outcome for the qubit labelled by: the ith element of -# `qubit_labels`, if `qubit_labels` is not None; the ith element of `pspec.qubit_labels`, otherwise. -# In both cases, the ith element of the tuple corresponds to the error-free outcome for the -# qubit on the ith wire of the output circuit. -# """ -# if isinstance(structure, str): -# assert(structure == '1Q'), "The only default `structure` option is the string '1Q'" -# structure = tuple([(q,) for q in pspec.qubit_labels]) -# n = pspec.num_qubits -# else: -# assert(isinstance(structure, list) or isinstance(structure, tuple) -# ), "If not a string, `structure` must be a list or tuple." -# qubits_used = [] -# for qubit_labels in structure: -# assert(isinstance(qubit_labels, list) or isinstance( -# qubit_labels, tuple)), "SubsetQs must be a list or a tuple!" -# qubits_used = qubits_used + list(qubit_labels) -# assert(len(set(qubits_used)) == len(qubits_used) -# ), "The qubits in the tuples/lists of `structure must all be unique!" - -# assert(set(qubits_used).issubset(set(pspec.qubit_labels)) -# ), "The qubits to benchmark must all be in the QubitProcessorSpec `pspec`!" -# n = len(qubits_used) - -# for qubit_labels in structure: -# subgraph = pspec.qubit_graph.subgraph(list(qubit_labels)) # or pspec.compute_clifford_2Q_connectivity? -# assert(subgraph.is_connected_graph()), "Each subset of qubits in `structure` must be connected!" - -# rand_state = _np.random.RandomState(seed) # OK if seed is None - -# # Creates a empty circuit over no wires -# circuit = _cir.Circuit(num_lines=0, editable=True) - -# s_rc_dict = {} -# p_rc_dict = {} -# circuit_dict = {} - -# for qubit_labels in structure: -# qubit_labels = tuple(qubit_labels) -# # Sample a random circuit of "native gates" over this set of qubits, with the -# # specified sampling. -# subset_circuit = create_random_circuit(pspec=pspec, length=length, qubit_labels=qubit_labels, sampler=sampler, -# samplerargs=samplerargs, addlocal=addlocal, lsargs=lsargs, -# rand_state=rand_state) -# circuit_dict[qubit_labels] = subset_circuit -# # find the symplectic matrix / phase vector this circuit implements. -# s_rc_dict[qubit_labels], p_rc_dict[qubit_labels] = _symp.symplectic_rep_of_clifford_circuit( -# subset_circuit, pspec=pspec) -# # Tensors this circuit with the current circuit -# circuit.tensor_circuit_inplace(subset_circuit) - -# # Creates empty circuits over no wires -# inversion_circuit = _cir.Circuit(num_lines=0, editable=True) -# if cliffordtwirl: -# initial_circuit = _cir.Circuit(num_lines=0, editable=True) - -# for qubit_labels in structure: -# qubit_labels = tuple(qubit_labels) -# subset_n = len(qubit_labels) -# # If we are clifford twirling, we do an initial random circuit that is either a uniformly random -# # cliffor or creates a uniformly random stabilizer state from the standard input. -# if cliffordtwirl: - -# # Sample a uniformly random Clifford. -# s_initial, p_initial = _symp.random_clifford(subset_n, rand_state=rand_state) -# # Find the composite action of this uniformly random clifford and the random circuit. -# s_composite, p_composite = _symp.compose_cliffords(s_initial, p_initial, s_rc_dict[qubit_labels], -# p_rc_dict[qubit_labels]) - -# # If conditionaltwirl we do a stabilizer prep (a conditional Clifford). -# if conditionaltwirl: -# subset_initial_circuit = _cmpl.compile_stabilizer_state(s_initial, p_initial, pspec, -# clifford_compilations.get('absolute', None), -# clifford_compilations.get('paulieq', None), -# qubit_labels, -# citerations, *compilerargs, -# rand_state=rand_state) -# # If not conditionaltwirl, we do a full random Clifford. -# else: -# subset_initial_circuit = _cmpl.compile_clifford(s_initial, p_initial, pspec, -# clifford_compilations.get('absolute', None), -# clifford_compilations.get('paulieq', None), -# qubit_labels, citerations, -# *compilerargs, rand_state=rand_state) - -# initial_circuit.tensor_circuit_inplace(subset_initial_circuit) - -# # If we are not Clifford twirling, we just copy the effect of the random circuit as the effect -# # of the "composite" prep + random circuit (as here the prep circuit is the null circuit). -# else: -# s_composite = _copy.deepcopy(s_rc_dict[qubit_labels]) -# p_composite = _copy.deepcopy(p_rc_dict[qubit_labels]) - -# if conditionaltwirl: -# # If we want to randomize the expected output then randomize the p vector, otherwise -# # it is left as p. Note that, unlike with compile_clifford, we don't invert (s,p) -# # before handing it to the stabilizer measurement function. -# if randomizeout: p_for_measurement = _symp.random_phase_vector(s_composite, subset_n, -# rand_state=rand_state) -# else: p_for_measurement = p_composite -# subset_inversion_circuit = _cmpl.compile_stabilizer_measurement( -# s_composite, p_for_measurement, pspec, -# clifford_compilations.get('absolute', None), -# clifford_compilations.get('paulieq', None), -# qubit_labels, citerations, *compilerargs, -# rand_state=rand_state) -# else: -# # Find the Clifford that inverts the circuit so far. We -# s_inverse, p_inverse = _symp.inverse_clifford(s_composite, p_composite) -# # If we want to randomize the expected output then randomize the p_inverse vector, otherwise -# # do not. -# if randomizeout: p_for_inversion = _symp.random_phase_vector(s_inverse, subset_n, rand_state=rand_state) -# else: p_for_inversion = p_inverse -# # Compile the Clifford. -# subset_inversion_circuit = _cmpl.compile_clifford(s_inverse, p_for_inversion, pspec, -# clifford_compilations.get('absolute', None), -# clifford_compilations.get('paulieq', None), -# qubit_labels, citerations, *compilerargs, -# rand_state=rand_state) - -# inversion_circuit.tensor_circuit_inplace(subset_inversion_circuit) - -# inversion_circuit.done_editing() - -# if cliffordtwirl: -# full_circuit = initial_circuit.copy(editable=True) -# full_circuit.append_circuit_inplace(circuit) -# full_circuit.append_circuit_inplace(inversion_circuit) -# else: -# full_circuit = _copy.deepcopy(circuit) -# full_circuit.append_circuit_inplace(inversion_circuit) - -# full_circuit.done_editing() - -# # Find the expected outcome of the circuit. -# s_out, p_out = _symp.symplectic_rep_of_clifford_circuit(full_circuit, pspec=pspec) -# if conditionaltwirl: # s_out is not always the identity with a conditional twirl, -# # only conditional on prep/measure. -# assert(_np.array_equal(s_out[:n, n:], _np.zeros((n, n), _np.int64))), "Compiler has failed!" -# else: assert(_np.array_equal(s_out, _np.identity(2 * n, _np.int64))), "Compiler has failed!" - -# # Find the ideal output of the circuit. -# s_inputstate, p_inputstate = _symp.prep_stabilizer_state(n, zvals=None) -# s_outstate, p_outstate = _symp.apply_clifford_to_stabilizer_state(s_out, p_out, s_inputstate, p_inputstate) -# idealout = [] -# for qubit_labels in structure: -# subset_idealout = [] -# for q in qubit_labels: -# qind = circuit.line_labels.index(q) -# measurement_out = _symp.pauli_z_measurement(s_outstate, p_outstate, qind) -# bit = measurement_out[1] -# assert(bit == 0 or bit == 1), "Ideal output is not a computational basis state!" -# if not randomizeout: -# assert(bit == 0), "Ideal output is not the all 0s computational basis state!" -# subset_idealout.append(int(bit)) -# idealout.append(tuple(subset_idealout)) -# idealout = tuple(idealout) - -# if not partitioned: outcircuit = full_circuit -# else: -# if cliffordtwirl: outcircuit = [initial_circuit, circuit, inversion_circuit] -# else: outcircuit = [circuit, inversion_circuit] - -# return outcircuit, idealout - - -# def create_simultaneous_direct_rb_experiment(pspec, depths, circuits_per_length, structure='1Q', -# sampler='Qelimination', -# samplerargs=[], addlocal=False, lsargs=[], randomizeout=False, -# cliffordtwirl=True, conditionaltwirl=True, citerations=20, -# compilerargs=[], -# partitioned=False, set_isolated=True, setcomplement_isolated=False, -# descriptor='A set of simultaneous DRB experiments', verbosity=1, -# seed=1234): -# """ -# Generates a simultaneous "direct randomized benchmarking" (DRB) experiments (circuits). - -# DRB is the protocol introduced in arXiv:1807.07975 (2018). -# An n-qubit DRB circuit consists of (1) a circuit the prepares a uniformly random stabilizer state; -# (2) a length-l circuit (specified by `length`) consisting of circuit layers sampled according to -# some user-specified distribution (specified by `sampler`), (3) a circuit that maps the output of -# the preceeding circuit to a computational basis state. See arXiv:1807.07975 (2018) for further -# details. In simultaneous DRB ...... . - -# Parameters -# ---------- -# pspec : QubitProcessorSpec -# The QubitProcessorSpec for the device that the circuit is being sampled for, which defines the -# "native" gate-set and the connectivity of the device. The returned DRB circuit will be over -# the gates in `pspec`, and will respect the connectivity encoded by `pspec`. Note that `pspec` -# is always handed to the sampler, as the first argument of the sampler function (this is only -# of importance when not using an in-built sampler for the "core" of the DRB circuit). Unless -# `qubit_labels` is not None, the circuit is sampled over all the qubits in `pspec`. - -# depths : int -# The set of "direct RB depths" for the circuits. The DRB depths must be integers >= 0. -# Unless `addlocal` is True, the DRB length is the depth of the "core" random circuit, -# sampled according to `sampler`, specified in step (2) above. If `addlocal` is True, -# each layer in the "core" circuit sampled according to "sampler` is followed by a layer of -# 1-qubit gates, with sampling specified by `lsargs` (and the first layer is proceeded by a -# layer of 1-qubit gates), and so the circuit of step (2) is length 2*`length` + 1. - -# circuits_per_length : int -# The number of (possibly) different DRB circuits sampled at each length. - -# structure : str or tuple. -# Defines the "structure" of the simultaneous DRB experiment. TODO : more details. - -# sampler : str or function, optional -# If a string, this should be one of: {'pairingQs', 'Qelimination', 'co2Qgates', 'local'}. -# Except for 'local', this corresponds to sampling layers according to the sampling function -# in rb.sampler named circuit_layer_by* (with * replaced by 'sampler'). For 'local', this -# corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates [which is not -# a valid form of sampling for n-qubit DRB, but is not explicitly forbidden in this function]. -# If `sampler` is a function, it should be a function that takes as the first argument a -# QubitProcessorSpec, and returns a random circuit layer as a list of gate Label objects. Note that -# the default 'Qelimination' is not necessarily the most useful in-built sampler, but it is the -# only sampler that requires no parameters beyond the QubitProcessorSpec *and* works for arbitrary -# connectivity devices. See the docstrings for each of these samplers for more information. - -# samplerargs : list, optional -# A list of arguments that are handed to the sampler function, specified by `sampler`. -# The first argument handed to the sampler is `pspec`, the second argument is `qubit_labels`, -# and `samplerargs` lists the remaining arguments handed to the sampler. This is not -# optional for some choices of `sampler`. - -# addlocal : bool, optional -# Whether to follow each layer in the "core" circuits, sampled according to `sampler` with -# a layer of 1-qubit gates. - -# lsargs : list, optional -# Only used if addlocal is True. A list of optional arguments handed to the 1Q gate -# layer sampler circuit_layer_by_oneQgate(). Specifies how to sample 1Q-gate layers. - -# randomizeout : bool, optional -# If False, the ideal output of the circuits (the "success" or "survival" outcome) is the all-zeros -# bit string. If True, the ideal output of each circuit is randomized to a uniformly random bit-string. -# This setting is useful for, e.g., detecting leakage/loss/measurement-bias etc. - -# cliffordtwirl : bool, optional -# Wether to begin the circuit with a sequence that generates a random stabilizer state. For -# standard DRB this should be set to True. There are a variety of reasons why it is better -# to have this set to True. - -# conditionaltwirl : bool, optional -# DRB only requires that the initial/final sequences of step (1) and (3) create/measure -# a uniformly random / particular stabilizer state, rather than implement a particular unitary. -# step (1) and (3) can be achieved by implementing a uniformly random Clifford gate and the -# unique inversion Clifford, respectively. This is implemented if `conditionaltwirl` is False. -# However, steps (1) and (3) can be implemented much more efficiently than this: the sequences -# of (1) and (3) only need to map a particular input state to a particular output state, -# if `conditionaltwirl` is True this more efficient option is chosen -- this is option corresponds -# to "standard" DRB. (the term "conditional" refers to the fact that in this case we essentially -# implementing a particular Clifford conditional on a known input). - -# citerations : int, optional -# Some of the stabilizer state / Clifford compilation algorithms in pyGSTi (including the default -# algorithms) are randomized, and the lowest-cost circuit is chosen from all the circuits generated -# in the iterations of the algorithm. This is the number of iterations used. The time required to -# generate a DRB circuit is linear in `citerations`. Lower-depth / lower 2-qubit gate count -# compilations of steps (1) and (3) are important in order to successfully implement DRB on as many -# qubits as possible. - -# compilerargs : list, optional -# A list of arguments that are handed to the compile_stabilier_state/measurement()functions (or the -# compile_clifford() function if `conditionaltwirl `is False). This includes all the optional -# arguments of these functions *after* the `iterations` option (set by `citerations`). For most -# purposes the default options will be suitable (or at least near-optimal from the compilation methods -# in-built into pyGSTi). See the docstrings of these functions for more information. - -# partitioned : bool, optional -# If False, each circuit is returned as a single full circuit. If True, each circuit is returned as -# a list of three circuits consisting of: (1) the stabilizer-prep circuit, (2) the core random circuit, -# (3) the pre-measurement circuit. In that case the full circuit is obtained by appended (2) to (1) -# and then (3) to (1). - -# set_isolated : bool, optional -# Todo - -# setcomplement_isolated : bool, optional -# Todo - -# descriptor : str, optional -# A description of the experiment being generated. Stored in the output dictionary. - -# verbosity : int, optional -# If > 0 the number of circuits generated so far is shown. - -# seed: int, optional -# Seed for RNG - -# Returns -# ------- -# Circuit or list of Circuits -# If partioned is False, a random DRB circuit sampled as specified. If partioned is True, a list of -# three circuits consisting of (1) the stabilizer-prep circuit, (2) the core random circuit, -# (3) the pre-measurement circuit. In that case the full circuit is obtained by appended (2) to (1) -# and then (3) to (1). -# Tuple -# A length-n tuple of integers in [0,1], corresponding to the error-free outcome of the -# circuit. Always all zeros if `randomizeout` is False. The ith element of the tuple -# corresponds to the error-free outcome for the qubit labelled by: the ith element of -# `qubit_labels`, if `qubit_labels` is not None; the ith element of `pspec.qubit_labels`, otherwise. -# In both cases, the ith element of the tuple corresponds to the error-free outcome for the -# qubit on the ith wire of the output circuit. -# dict -# A dictionary containing the generated RB circuits, the error-free outputs of the circuit, -# and the specification used to generate the circuits. The keys are: - -# - 'circuits'. A dictionary of the sampled circuits. The circuit with key(l,k) is the kth circuit -# at DRB length l. - -# - 'idealout'. A dictionary of the error-free outputs of the circuits as tuples. The tuple with -# key(l,k) is the error-free output of the (l,k) circuit. The ith element of this tuple corresponds -# to the error-free outcome for the qubit on the ith wire of the output circuit and/or the ith element -# of the list at the key 'qubitordering'. These tuples will all be (0,0,0,...) when `randomizeout` is -# False - -# - 'qubitordering'. The ordering of the qubits in the 'idealout' tuples. - -# - 'spec'. A dictionary containing all of the parameters handed to this function, except `pspec`. -# This then specifies how the circuits where generated. -# """ - -# experiment_dict = {} -# experiment_dict['spec'] = {} -# experiment_dict['spec']['depths'] = depths -# experiment_dict['spec']['circuits_per_length'] = circuits_per_length -# experiment_dict['spec']['sampler'] = sampler -# experiment_dict['spec']['samplerargs'] = samplerargs -# experiment_dict['spec']['addlocal'] = addlocal -# experiment_dict['spec']['lsargs'] = lsargs -# experiment_dict['spec']['randomizeout'] = randomizeout -# experiment_dict['spec']['cliffordtwirl'] = cliffordtwirl -# experiment_dict['spec']['conditionaltwirl'] = conditionaltwirl -# experiment_dict['spec']['citerations'] = citerations -# experiment_dict['spec']['compilerargs'] = compilerargs -# experiment_dict['spec']['partitioned'] = partitioned -# experiment_dict['spec']['descriptor'] = descriptor -# experiment_dict['spec']['createdby'] = 'extras.rb.sample.simultaneous_direct_rb_experiment' - -# #rand_state = _np.random.RandomState(seed) # OK if seed is None - -# if isinstance(structure, str): -# assert(structure == '1Q'), "The only default `structure` option is the string '1Q'" -# structure = tuple([(q,) for q in pspec.qubit_labels]) -# else: -# assert(isinstance(structure, list) or isinstance(structure, tuple)), \ -# "If not a string, `structure` must be a list or tuple." -# qubits_used = [] -# for qubit_labels in structure: -# assert(isinstance(qubit_labels, list) or isinstance( -# qubit_labels, tuple)), "SubsetQs must be a list or a tuple!" -# qubits_used = qubits_used + list(qubit_labels) -# assert(len(set(qubits_used)) == len(qubits_used)), \ -# "The qubits in the tuples/lists of `structure must all be unique!" - -# assert(set(qubits_used).issubset(set(pspec.qubit_labels))), \ -# "The qubits to benchmark must all be in the QubitProcessorSpec `pspec`!" - -# experiment_dict['spec']['structure'] = structure -# experiment_dict['circuits'] = {} -# experiment_dict['target'] = {} -# experiment_dict['settings'] = {} - -# for qubit_labels in structure: -# subgraph = pspec.qubit_graph.subgraph(list(qubit_labels)) # or pspec.compute_clifford_2Q_connectivity? -# assert(subgraph.is_connected_graph()), "Each subset of qubits in `structure` must be connected!" - -# for lnum, l in enumerate(depths): -# lseed = seed + lnum * circuits_per_length -# if verbosity > 0: -# print('- Sampling {} circuits at DRB length {} ({} of {} depths) with seed {}'.format(circuits_per_length, -# l, lnum + 1, -# len(depths), lseed)) -# print(' - Number of circuits sampled = ', end='') -# for j in range(circuits_per_length): -# circuit, idealout = sample_simultaneous_direct_rb_circuit(pspec, l, structure=structure, sampler=sampler, -# samplerargs=samplerargs, addlocal=addlocal, -# lsargs=lsargs, randomizeout=randomizeout, -# cliffordtwirl=cliffordtwirl, -# conditionaltwirl=conditionaltwirl, -# citerations=citerations, -# compilerargs=compilerargs, -# partitioned=partitioned, -# seed=lseed + j) - -# if (not set_isolated) and (not setcomplement_isolated): -# experiment_dict['circuits'][l, j] = circuit -# experiment_dict['target'][l, j] = idealout - -# else: -# experiment_dict['circuits'][l, j] = {} -# experiment_dict['target'][l, j] = {} -# experiment_dict['settings'][l, j] = {} -# experiment_dict['circuits'][l, j][tuple(structure)] = circuit -# experiment_dict['target'][l, j][tuple(structure)] = idealout -# experiment_dict['settings'][l, j][tuple(structure)] = _get_setting(l, j, structure, depths, -# circuits_per_length, structure) - -# if set_isolated: -# for subset_ind, subset in enumerate(structure): -# subset_circuit = circuit.copy(editable=True) -# for q in circuit.line_labels: -# if q not in subset: -# subset_circuit.replace_with_idling_line_inplace(q) -# subset_circuit.done_editing() -# experiment_dict['circuits'][l, j][(tuple(subset),)] = subset_circuit -# experiment_dict['target'][l, j][(tuple(subset),)] = (idealout[subset_ind],) -# experiment_dict['settings'][l, j][(tuple(subset),)] = _get_setting(l, j, (tuple(subset),), depths, -# circuits_per_length, structure) - -# if setcomplement_isolated: -# for subset_ind, subset in enumerate(structure): -# subsetcomplement_circuit = circuit.copy(editable=True) -# for q in circuit.line_labels: -# if q in subset: -# subsetcomplement_circuit.replace_with_idling_line_inplace(q) -# subsetcomplement_circuit.done_editing() -# subsetcomplement = list(_copy.copy(structure)) -# subsetcomplement_idealout = list(_copy.copy(idealout)) -# del subsetcomplement[subset_ind] -# del subsetcomplement_idealout[subset_ind] -# subsetcomplement = tuple(subsetcomplement) -# subsetcomplement_idealout = tuple(subsetcomplement_idealout) -# experiment_dict['circuits'][l, j][subsetcomplement] = subsetcomplement_circuit -# experiment_dict['target'][l, j][subsetcomplement] = subsetcomplement_idealout -# experiment_dict['settings'][l, j][subsetcomplement] = _get_setting(l, j, subsetcomplement, depths, -# circuits_per_length, structure) - -# if verbosity > 0: print(j + 1, end=',') -# if verbosity > 0: print('') - -# return experiment_dict def _sample_clifford_circuit(pspec, clifford_compilations, qubit_labels, citerations, compilerargs, exact_compilation_key, srep_cache, rand_state): diff --git a/pygsti/algorithms/rbfit.py b/pygsti/algorithms/rbfit.py index ba54b3706..167157c73 100644 --- a/pygsti/algorithms/rbfit.py +++ b/pygsti/algorithms/rbfit.py @@ -17,132 +17,6 @@ from pygsti.tools import rbtools as _rbt -# Obsolute function to be deleted. -# def std_practice_analysis(RBSdataset, seed=[0.8, 0.95], bootstrap_samples=200, asymptote='std', rtype='EI', -# datatype='auto'): -# """ -# Implements a "standard practice" analysis of RB data. Fits the average success probabilities to the exponential -# decay A + Bp^m, using least-squares fitting, with (1) A fixed (as standard, to 1/2^n where n is the number of -# qubits the data is for), and (2) A, B and p all allowed to varying. Confidence intervals are also estimated using -# a standard non-parameteric boostrap. - -# Parameters -# ---------- -# RBSdataset : RBSummaryDataset -# An RBSUmmaryDataset containing the data to analyze - -# seed : list, optional -# Seeds for the fit of B and p (A is seeded to the asymptote defined by `asympote`). - -# bootstrap_samples : int, optional -# The number of samples in the bootstrap. - -# asymptote : str or float, optional -# The A value for the fitting to A + Bp^m with A fixed. If a string must be 'std', in -# in which case A is fixed to 1/2^n. - -# rtype : {'EI','AGI'}, optional -# The RB error rate rescaling convention. 'EI' results in RB error rates that are associated -# with the entanglement infidelity, which is the error probability with stochastic errors (and -# is equal to the diamond distance). 'AGI' results in RB error rates that are associated with -# average gate infidelity. - -# Returns -# ------- -# RBResults -# An object encapsulating the RB results (and data). - -# """ -# assert(datatype == 'raw' or datatype == 'adjusted' or datatype == 'auto'), "Unknown data type!" - -# if datatype == 'auto': -# if RBSdataset.datatype == 'hamming_distance_counts': -# datatype = 'adjusted' -# else: -# datatype = 'raw' - -# lengths = RBSdataset.lengths -# n = RBSdataset.num_qubits - -# if isinstance(asymptote, str): -# assert(asymptote == 'std'), "If `asympotote` is a string it must be 'std'!" -# if datatype == 'raw': -# asymptote = 1 / 2**n -# elif datatype == 'adjusted': -# asymptote = 1 / 4**n - -# if datatype == 'adjusted': -# ASPs = RBSdataset.adjusted_ASPs -# if datatype == 'raw': -# ASPs = RBSdataset.ASPs - -# FF_results, FAF_results = std_least_squares_fit(lengths, ASPs, n, seed=seed, asymptote=asymptote, -# ftype='full+FA', rtype=rtype) - -# parameters = ['A', 'B', 'p', 'r'] -# bootstraps_FF = {} -# bootstraps_FAF = {} - -# if bootstrap_samples > 0: - -# bootstraps_FF = {p: [] for p in parameters} -# bootstraps_FAF = {p: [] for p in parameters} -# failcount_FF = 0 -# failcount_FAF = 0 - -# # Add bootstrapped data, if neccessary. -# RBSdataset.add_bootstrapped_datasets(samples=bootstrap_samples) - -# for i in range(bootstrap_samples): - -# if datatype == 'adjusted': -# BS_ASPs = RBSdataset.bootstraps[i].adjusted_ASPs -# if datatype == 'raw': -# BS_ASPs = RBSdataset.bootstraps[i].ASPs - -# BS_FF_results, BS_FAF_results = std_least_squares_fit(lengths, BS_ASPs, n, seed=seed, -# asymptote=asymptote, ftype='full+FA', -# rtype=rtype) - -# if BS_FF_results['success']: -# for p in parameters: -# bootstraps_FF[p].append(BS_FF_results['estimates'][p]) -# else: -# failcount_FF += 1 -# if BS_FAF_results['success']: -# for p in parameters: -# bootstraps_FAF[p].append(BS_FAF_results['estimates'][p]) -# else: -# failcount_FAF += 1 - -# failrate_FF = failcount_FF / bootstrap_samples -# failrate_FAF = failcount_FAF / bootstrap_samples - -# std_FF = {p: _np.std(_np.array(bootstraps_FF[p])) for p in parameters} -# std_FAF = {p: _np.std(_np.array(bootstraps_FAF[p])) for p in parameters} - -# else: -# bootstraps_FF = None -# std_FF = None -# failrate_FF = None -# bootstraps_FAF = None -# std_FAF = None -# failrate_FAF = None - -# fits = {} -# fits['full'] = FitResults('LS', FF_results['seed'], rtype, FF_results['success'], FF_results['estimates'], -# FF_results['variable'], stds=std_FF, bootstraps=bootstraps_FF, -# bootstraps_failrate=failrate_FF) - -# fits['A-fixed'] = FitResults('LS', FAF_results['seed'], rtype, FAF_results['success'], -# FAF_results['estimates'], FAF_results['variable'], stds=std_FAF, -# bootstraps=bootstraps_FAF, bootstraps_failrate=failrate_FAF) - -# results = SimpleRBResults(RBSdataset, rtype, fits) - -# return results - - def std_least_squares_fit(lengths, asps, n, seed=None, asymptote=None, ftype='full', rtype='EI'): """ Implements a "standard" least-squares fit of RB data. @@ -469,111 +343,3 @@ def _from_nice_serialization(cls, state): return cls(state['fit_type'], state['seed'], state['r_type'], state['success'], state['estimates'], state['variable'], state['stds'], state['bootstraps'], state['bootstraps_failrate']) - -# Obsolute RB results class -# class SimpleRBResults(object): -# """ -# An object to contain the results of an RB analysis. - -# """ - -# def __init__(self, data, rtype, fits): -# """ -# Initialize an RBResults object. - -# Parameters -# ---------- -# data : RBSummaryDataset -# The RB summary data that the analysis was performed for. - -# rtype : {'IE','AGI'} -# The type of RB error rate, corresponding to different dimension-dependent -# re-scalings of (1-p), where p is the RB decay constant in A + B*p^m. - -# fits : dict -# A dictionary containing FitResults objects, obtained from one or more -# fits of the data (e.g., a fit with all A, B and p as free parameters and -# a fit with A fixed to 1/2^n). -# """ -# self.data = data -# self.rtype = rtype -# self.fits = fits - -# def plot(self, fitkey=None, decay=True, success_probabilities=True, size=(8, 5), ylim=None, xlim=None, -# legend=True, title=None, figpath=None): -# """ -# Plots RB data and, optionally, a fitted exponential decay. - -# Parameters -# ---------- -# fitkey : dict key, optional -# The key of the self.fits dictionary to plot the fit for. If None, will -# look for a 'full' key (the key for a full fit to A + Bp^m if the standard -# analysis functions are used) and plot this if possible. It otherwise checks -# that there is only one key in the dict and defaults to this. If there are -# multiple keys and none of them are 'full', `fitkey` must be specified when -# `decay` is True. - -# decay : bool, optional -# Whether to plot a fit, or just the data. - -# success_probabilities : bool, optional -# Whether to plot the success probabilities distribution, as a violin plot. (as well -# as the *average* success probabilities at each length). - -# size : tuple, optional -# The figure size - -# ylim, xlim : tuple, optional -# The x and y limits for the figure. - -# legend : bool, optional -# Whether to show a legend. - -# title : str, optional -# A title to put on the figure. - -# figpath : str, optional -# If specified, the figure is saved with this filename. -# """ - -# # Future : change to a plotly plot. -# try: import matplotlib.pyplot as _plt -# except ImportError: raise ValueError("This function requires you to install matplotlib!") - -# if decay and fitkey is None: -# allfitkeys = list(self.fits.keys()) -# if 'full' in allfitkeys: fitkey = 'full' -# else: -# assert(len(allfitkeys) == 1), \ -# "There are multiple fits and none have the key 'full'. Please specify the fit to plot!" -# fitkey = allfitkeys[0] - -# _plt.figure(figsize=size) -# _plt.plot(self.data.lengths, self.data.ASPs, 'o', label='Average success probabilities') - -# if decay: -# lengths = _np.linspace(0, max(self.data.lengths), 200) -# A = self.fits[fitkey].estimates['A'] -# B = self.fits[fitkey].estimates['B'] -# p = self.fits[fitkey].estimates['p'] -# _plt.plot(lengths, A + B * p**lengths, -# label='Fit, r = {:.2} +/- {:.1}'.format(self.fits[fitkey].estimates['r'], -# self.fits[fitkey].stds['r'])) - -# if success_probabilities: -# _plt.violinplot(list(self.data.success_probabilities), self.data.lengths, points=10, widths=1., -# showmeans=False, showextrema=False, showmedians=False) # , label='Success probabilities') - -# if title is not None: _plt.title(title) -# _plt.ylabel("Success probability") -# _plt.xlabel("RB sequence length $(m)$") -# _plt.ylim(ylim) -# _plt.xlim(xlim) - -# if legend: _plt.legend() - -# if figpath is not None: _plt.savefig(figpath, dpi=1000) -# else: _plt.show() - -# return diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 8f254198e..d3090405b 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -79,11 +79,6 @@ def label_index(self, label, ok_if_missing=False): return None return self._label_indices[label] - #@property - #def sslbls(self): - # """ The support of this errorgen space, e.g., the qubits where its elements may be nontrivial """ - # return self.sslbls - def create_subbasis(self, must_overlap_with_these_sslbls): """ Create a sub-basis of this basis by including only the elements @@ -491,11 +486,6 @@ def label_index(self, elemgen_label, ok_if_missing=False): return base + indices[elemgen_label] - #@property - #def sslbls(self): - # """ The support of this errorgen space, e.g., the qubits where its elements may be nontrivial """ - # return self.sslbls - def create_subbasis(self, must_overlap_with_these_sslbls, retain_max_weights=True): """ Create a sub-basis of this basis by including only the elements @@ -524,125 +514,3 @@ def intersection(self, other_basis): def difference(self, other_basis): return self.to_explicit_basis().difference(other_basis) - - -#OLD - maybe not needed? -#class LowWeightElementaryErrorgenBasis(ElementaryErrorgenBasis): -# """ -# Spanned by the elementary error generators of given type(s) (e.g. "Hamiltonian" and/or "other") -# and with elements corresponding to a `Basis`, usually of Paulis. -# """ -# -# def __init__(self, basis_1q, state_space, other_mode, max_ham_weight=None, max_other_weight=None, -# must_overlap_with_these_sslbls=None): -# self._basis_1q = basis_1q -# self._other_mode = other_mode -# self.state_space = state_space -# self._max_ham_weight = max_ham_weight -# self._max_other_weight = max_other_weight -# self._must_overlap_with_these_sslbls = must_overlap_with_these_sslbls -# -# assert(self.state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" -# sslbls = self.state_space.sole_tensor_product_block_labels # all the model's state space labels -# self.sslbls = sslbls # the "support" of this space - the qubit labels -# -# self._cached_label_indices = None -# self._cached_labels_by_support = None -# self._cached_elements = None -# -# #Needed? -# # self.dim = len(self.labels) # TODO - update this so we don't always need to build labels -# # # (this defeats lazy building via property below) - we can just compute this, especially if -# # # not too fancy -# -# @property -# def labels(self): -# if self._cached_label_indices is None: -# -# def _basis_el_strs(possible_bels, wt): -# for els in _itertools.product(*([possible_bels] * wt)): -# yield ''.join(els) -# -# labels = {} -# all_bels = self.basis_1q.labels[1:] # assume first element is identity -# nontrivial_bels = self.basis_1q.labels[1:] # assume first element is identity -# -# max_weight = self._max_ham_weight if (self._max_ham_weight is not None) else len(self.sslbls) -# for weight in range(1, max_weight + 1): -# for support in _itertools.combinations(self.sslbls, weight): -# if (self._must_overlap_with_these_sslbls is not None -# and len(self._must_overlap_with_these_sslbls.intersection(support)) == 0): -# continue -# if support not in labels: labels[support] = [] # always True? -# labels[support].extend([('H', bel) for bel in _basis_el_strs(nontrivial_bels, weight)]) -# -# max_weight = self._max_other_weight if (self._max_other_weight is not None) else len(self.sslbls) -# if self._other_mode != "all": -# for weight in range(1, max_weight + 1): -# for support in _itertools.combinations(self.sslbls, weight): -# if (self._must_overlap_with_these_sslbls is not None -# and len(self._must_overlap_with_these_sslbls.intersection(support)) == 0): -# continue -# if support not in labels: labels[support] = [] -# labels[support].extend([('S', bel) for bel in _basis_el_strs(nontrivial_bels, weight)]) -# else: -# #This is messy code that relies on basis labels being single characters -- TODO improve(?) -# idle_char = self.basis_1q.labels[1:] # assume first element is identity -# assert(len(idle_char) == 1 and all([len(c) == 1 for c in nontrivial_bels])), \ -# "All basis el labels must be single chars for other_mode=='all'!" -# for support in _itertools.combinations(self.sslbls, max_weight): -# # Loop over all possible basis elements for this max-weight support, computing the actual support -# # of each one individually and appending it to the appropriate list -# for bel1 in _basis_el_strs(all_bels, max_weight): -# nonidle_indices1 = [i for i in range(max_weight) if bel1[i] != idle_char] -# for bel2 in _basis_el_strs(all_bels, max_weight): -# nonidle_indices2 = [i for i in range(max_weight) if bel2[i] != idle_char] -# nonidle_indices = list(sorted(set(nonidle_indices1) + set(nonidle_indices2))) -# bel1 = ''.join([bel1[i] for i in nonidle_indices]) # trim to actual support -# bel2 = ''.join([bel2[i] for i in nonidle_indices]) # trim to actual support -# actual_support = tuple([support[i] for i in nonidle_indices]) -# -# if (self._must_overlap_with_these_sslbls is not None -# and len(self._must_overlap_with_these_sslbls.intersection(actual_support)) == 0): -# continue -# -# if actual_support not in labels: labels[actual_support] = [] -# labels[actual_support].append(('S', bel1, bel2)) -# -# self._cached_labels_by_support = labels -# self._cached_label_indices = _collections.OrderedDict(((support_lbl, i) for i, support_lbl in enumerate( -# ((support, lbl) for support, lst in labels.items() for lbl in lst)))) -# -# return tuple(self._cached_label_indices.keys()) -# -# @property -# def element_supports_and_matrices(self): -# if self._cached_elements is None: -# self._cached_elements = tuple( -# ((support, _ot.lindblad_error_generator(elemgen_label, self.basis_1q, normalize=True, sparse=False)) -# for support, elemgen_label in self.labels)) -# return self._cached_elements -# -# def element_index(self, label): -# """ -# TODO: docstring -# """ -# if self._cached_label_indices is None: -# self.labels # triggers building of labels -# return self._cached_label_indices[label] -# -# @property -# def sslbls(self): -# """ The support of this errorgen space, e.g., the qubits where its elements may be nontrivial """ -# return self.sslbls -# -# def create_subbasis(self, must_overlap_with_these_sslbls, retain_max_weights=True): -# """ -# Create a sub-basis of this basis by including only the elements -# that overlap the given support (state space labels) -# """ -# #Note: can we reduce self.state_space? -# return CompleteErrorgenBasis(self._basis_1q, self.state_space, self._other_mode, -# self._max_ham_weight if retain_max_weights else None, -# self._max_other_weight if retain_max_weights else None, -# self._must_overlap_with_these_sslbls) diff --git a/pygsti/baseobjs/errorgenspace.py b/pygsti/baseobjs/errorgenspace.py index d1df666a4..ad71ad4c7 100644 --- a/pygsti/baseobjs/errorgenspace.py +++ b/pygsti/baseobjs/errorgenspace.py @@ -97,13 +97,3 @@ def normalize(self, norm_order=2): for j in range(self.vectors.shape[1]): sign = +1 if max(self.vectors[:, j]) >= -min(self.vectors[:, j]) else -1 self.vectors[:, j] /= sign * _np.linalg.norm(self.vectors[:, j], ord=norm_order) - - -#class LowWeightErrorgenSpace(ErrorgenSpace): -# """ -# Like a SimpleErrorgenSpace but spanned by only the elementary error generators corresponding to -# low-weight (up to some maximum weight) basis elements -# (so far, only Pauli-product bases work for this, since `Basis` objects don't keep track of each -# element's weight (?)). -# """ -# pass diff --git a/pygsti/baseobjs/label.py b/pygsti/baseobjs/label.py index 55cb15c1a..a9033239b 100644 --- a/pygsti/baseobjs/label.py +++ b/pygsti/baseobjs/label.py @@ -360,7 +360,6 @@ def map_state_space_labels(self, mapper): mapped_sslbls = [mapper(sslbl) for sslbl in self.sslbls] return Label(self.name, mapped_sslbls) - def __str__(self): """ Defines how a Label is printed out, e.g. Gx:0 or Gcnot:1:2 diff --git a/pygsti/baseobjs/polynomial.py b/pygsti/baseobjs/polynomial.py index eed6e88c0..528e7ff79 100644 --- a/pygsti/baseobjs/polynomial.py +++ b/pygsti/baseobjs/polynomial.py @@ -35,7 +35,7 @@ def _vinds_to_int(vinds, vindices_per_int, max_num_vars): return tuple(ret_tup) -class FASTPolynomial(object): +class Polynomial(object): """ A polynomial that behaves like a Python dict of coefficients. @@ -136,7 +136,7 @@ def product(cls, list_of_polys): rep = list_of_polys[0]._rep for p in list_of_polys[1:]: rep = rep.mult(p._rep) - return FASTPolynomial.from_rep(rep) + return Polynomial.from_rep(rep) def __init__(self, coeffs=None, max_num_vars=100): """ @@ -161,7 +161,7 @@ def __init__(self, coeffs=None, max_num_vars=100): have (x_0 to x_(`max_num_vars-1`)). This sets the maximum allowed variable index within this polynomial. """ - vindices_per_int = FASTPolynomial._vindices_per_int(max_num_vars) + vindices_per_int = Polynomial._vindices_per_int(max_num_vars) int_coeffs = {_vinds_to_int(k, vindices_per_int, max_num_vars): v for k, v in coeffs.items()} self._rep = _PolynomialRep(int_coeffs, max_num_vars, vindices_per_int) @@ -245,7 +245,7 @@ def deriv(self, wrt_param): del l[l.index(wrt_param)] dcoeffs[tuple(l)] = cnt * coeff - return FASTPolynomial(dcoeffs, self.max_num_vars) + return Polynomial(dcoeffs, self.max_num_vars) @property def degree(self): @@ -314,7 +314,7 @@ def copy(self): ------- Polynomial """ - return FASTPolynomial.from_rep(self._rep.copy()) + return Polynomial.from_rep(self._rep.copy()) def map_indices(self, mapfn): """ @@ -335,7 +335,7 @@ def map_indices(self, mapfn): ------- Polynomial """ - return FASTPolynomial({mapfn(k): v for k, v in self.coeffs.items()}, self.max_num_vars) + return Polynomial({mapfn(k): v for k, v in self.coeffs.items()}, self.max_num_vars) def map_indices_inplace(self, mapfn): """ @@ -421,7 +421,7 @@ def mult(self, x): Polynomial The polynomial representing self * x. """ - return FASTPolynomial.from_rep(self._rep.mult(x._rep)) + return Polynomial.from_rep(self._rep.mult(x._rep)) def scale(self, x): """ @@ -488,7 +488,7 @@ def __repr__(self): def __add__(self, x): newpoly = self.copy() - if isinstance(x, FASTPolynomial): + if isinstance(x, Polynomial): newpoly._rep.add_inplace(x._rep) else: # assume a scalar that can be added to values newpoly._rep.add_scalar_to_all_coeffs_inplace(x) @@ -496,14 +496,14 @@ def __add__(self, x): def __iadd__(self, x): """ Does self += x more efficiently """ - if isinstance(x, FASTPolynomial): + if isinstance(x, Polynomial): self._rep.add_inplace(x._rep) else: # assume a scalar that can be added to values self._rep.add_scalar_to_all_coeffs_inplace(x) return self def __mul__(self, x): - if isinstance(x, FASTPolynomial): + if isinstance(x, Polynomial): return self.mult(x) else: # assume a scalar that can multiply values return self.scalar_mult(x) @@ -511,23 +511,8 @@ def __mul__(self, x): def __rmul__(self, x): return self.__mul__(x) - #Punt for now?? - #def __imul__(self, x): - # if isinstance(x, Polynomial): - # newcoeffs = {} - # for k1, v1 in self.items(): - # for k2, v2 in x.items(): - # k = tuple(sorted(k1 + k2)) - # if k in newcoeffs: newcoeffs[k] += v1 * v2 - # else: newcoeffs[k] = v1 * v2 - # self.clear() - # self.update(newcoeffs) - # else: - # self.scale(x) - # return self - def __pow__(self, n): - ret = FASTPolynomial({(): 1.0}, self.max_num_vars) # max_order updated by mults below + ret = Polynomial({(): 1.0}, self.max_num_vars) # max_order updated by mults below cur = self for i in range(int(_np.floor(_np.log2(n))) + 1): rem = n % 2 # gets least significant bit (i-th) of n @@ -555,575 +540,8 @@ def to_rep(self): # , max_num_vars=None not needed anymore -- given at __init__ return self._rep -Polynomial = FASTPolynomial - - -# class SLOWPolynomial(dict): # REMOVE THIS CLASS (just for reference) -# """ -# Encapsulates a polynomial as a subclass of the standard Python dict. -# -# Variables are represented by integer indices, e.g. "2" means "x_2". -# Keys are tuples of variable indices and values are numerical -# coefficients (floating point or complex numbers). To specify a variable -# to some power, its index is repeated in the key-tuple. -# -# E.g. x_0^2 + 3*x_1 + 4 is stored as {(0,0): 1.0, (1,): 3.0, (): 4.0} -# -# Parameters -# ---------- -# coeffs : dict -# A dictionary of coefficients. Keys are tuples of integers that -# specify the polynomial term the coefficient value multiplies -# (see above). If None, the zero polynomial (no terms) is created. -# -# max_num_vars : int -# The maximum number of independent variables this polynomial can -# hold. Placing a limit on the number of variables allows more -# compact storage and efficient evaluation of the polynomial. -# """ -# -# @classmethod -# def _vindices_per_int(cls, max_num_vars): -# """ -# The number of variable indices that fit into a single int when there are at most `max_num_vars` variables. -# -# This quantity is needed to directly construct Polynomial representations -# and is thus useful internally for forward simulators. -# -# Parameters -# ---------- -# max_num_vars : int -# The maximum number of independent variables. -# -# Returns -# ------- -# int -# """ -# return int(_np.floor(PLATFORM_BITS / _np.log2(max_num_vars + 1))) -# -# @classmethod -# def from_rep(cls, rep): -# """ -# Creates a Polynomial from a "representation" (essentially a lite-version) of a Polynomial. -# -# Note: usually we only need to convert from full-featured Python objects -# to the lighter-weight "representation" objects. Polynomials are an -# exception, since as the results of probability computations they need -# to be converted back from "representation-form" to "full-form". -# -# Parameters -# ---------- -# rep : PolynomialRep -# A polynomial representation. -# -# Returns -# ------- -# Polynomial -# """ -# max_num_vars = rep.max_num_vars # one of the few/only cases where a rep -# # max_order = rep.max_order # needs to expose some python properties -# -# def int_to_vinds(indx_tup): -# ret = [] -# for indx in indx_tup: -# while indx != 0: -# nxt = indx // (max_num_vars + 1) -# i = indx - nxt * (max_num_vars + 1) -# ret.append(i - 1) -# indx = nxt -# #assert(len(ret) <= max_order) #TODO: is this needed anymore? -# return tuple(sorted(ret)) -# -# tup_coeff_dict = {int_to_vinds(k): val for k, val in rep.coeffs.items()} -# ret = cls(tup_coeff_dict) -# ret.fastpoly = FASTPolynomial.from_rep(rep) -# ret._check_fast_polynomial() -# return ret -# -# def __init__(self, coeffs=None, max_num_vars=100): -# """ -# Initializes a new Polynomial object (a subclass of dict). -# -# Internally (as a dict) a Polynomial represents variables by integer -# indices, e.g. "2" means "x_2". Keys are tuples of variable indices and -# values are numerical coefficients (floating point or complex numbers). -# A variable to a power > 1 has its index repeated in the key-tuple. -# -# E.g. x_0^2 + 3*x_1 + 4 is stored as `{(0,0): 1.0, (1,): 3.0, (): 4.0}` -# -# Parameters -# ---------- -# coeffs : dict -# A dictionary of coefficients. Keys are tuples of integers that -# specify the polynomial term the coefficient value multiplies -# (see above). If None, the zero polynomial (no terms) is created. -# -# max_num_vars : int -# The maximum number of independent variables this polynomial can -# hold. Placing a limit on the number of variables allows more -# compact storage and efficient evaluation of the polynomial. -# """ -# super(Polynomial, self).__init__() -# if coeffs is not None: -# self.update(coeffs) -# self.max_num_vars = max_num_vars -# self.fastpoly = FASTPolynomial(coeffs, max_num_vars) -# self._check_fast_polynomial() -# -# def _check_fast_polynomial(self, raise_err=True): -# """ -# Check that included FASTPolynomial has remained in-sync with this one. -# -# This is purely for debugging, to ensure that the FASTPolynomial -# class implements its operations correctly. -# -# Parameters -# ---------- -# raise_err : bool, optional -# Whether to raise an AssertionError if the check fails. -# -# Returns -# ------- -# bool -# Whether or not the check has succeeded (True if the -# fast and slow implementations are in sync). -# """ -# if set(self.fastpoly.coeffs.keys()) != set(self.keys()): -# print("FAST", self.fastpoly.coeffs, " != SLOW", dict(self)) -# if raise_err: assert(False), "STOP" -# return False -# for k in self.fastpoly.coeffs.keys(): -# if not _np.isclose(self.fastpoly.coeffs[k], self[k]): -# print("FAST", self.fastpoly.coeffs, " != SLOW", dict(self)) -# if raise_err: assert(False), "STOP" -# return False -# if self.max_num_vars != self.fastpoly.max_num_vars: -# print("#Var mismatch: FAST", self.fastpoly.max_num_vars, " != SLOW", self.max_num_vars) -# if raise_err: assert(False), "STOP" -# return False -# -# return True -# -# def deriv(self, wrt_param): -# """ -# Take the derivative of this Polynomial with respect to a single variable. -# -# The result is another Polynomial. -# -# E.g. deriv(x_2^3 + 3*x_1, wrt_param=2) = 3x^2 -# -# Parameters -# ---------- -# wrt_param : int -# The variable index to differentiate with respect to. -# E.g. "4" means "differentiate w.r.t. x_4". -# -# Returns -# ------- -# Polynomial -# """ -# dcoeffs = {} -# for ivar, coeff in self.items(): -# cnt = float(ivar.count(wrt_param)) -# if cnt > 0: -# l = list(ivar) -# del l[l.index(wrt_param)] -# dcoeffs[tuple(l)] = cnt * coeff -# -# ret = Polynomial(dcoeffs, self.max_num_vars) -# ret.fastpoly = self.fastpoly.deriv(wrt_param) -# ret._check_fast_polynomial() -# return ret -# -# def degree(self): -# """ -# The largest sum-of-exponents for any term (monomial) within this polynomial. -# -# E.g. for x_2^3 + x_1^2*x_0^2 has degree 4. -# -# Returns -# ------- -# int -# """ -# ret = max((len(k) for k in self), default=0) -# assert(self.fastpoly.degree == ret) -# self._check_fast_polynomial() -# return ret -# -# def evaluate(self, variable_values): -# """ -# Evaluate this polynomial for a given set of variable values. -# -# Parameters -# ---------- -# variable_values : array-like -# An object that can be indexed so that `variable_values[i]` gives the -# numerical value for i-th variable (x_i). -# -# Returns -# ------- -# float or complex -# Depending on the types of the coefficients and `variable_values`. -# """ -# #FUTURE: make this function smarter (Russian peasant) -# ret = 0 -# for ivar, coeff in self.items(): -# ret += coeff * _np.prod([variable_values[i] for i in ivar]) -# assert(_np.isclose(ret, self.fastpoly.evaluate(variable_values))) -# self._check_fast_polynomial() -# return ret -# -# def compact(self, complex_coeff_tape=True): -# """ -# Generate a compact form of this polynomial designed for fast evaluation. -# -# The resulting "tapes" can be evaluated using -# :func:`opcalc.bulk_eval_compact_polynomials`. -# -# Parameters -# ---------- -# complex_coeff_tape : bool, optional -# Whether the `ctape` returned array is forced to be of complex type. -# If False, the real part of all coefficients is taken (even if they're -# complex). -# -# Returns -# ------- -# vtape, ctape : numpy.ndarray -# These two 1D arrays specify an efficient means for evaluating this -# polynomial. -# """ -# #if force_complex: -# # iscomplex = True -# #else: -# # iscomplex = any([abs(_np.imag(x)) > 1e-12 for x in self.values()]) -# iscomplex = complex_coeff_tape -# -# nTerms = len(self) -# nVarIndices = sum(map(len, self.keys())) -# vtape = _np.empty(1 + nTerms + nVarIndices, _np.int64) # "variable" tape -# ctape = _np.empty(nTerms, complex if iscomplex else 'd') # "coefficient tape" -# -# i = 0 -# vtape[i] = nTerms; i += 1 -# for iTerm, k in enumerate(sorted(self.keys())): -# l = len(k) -# ctape[iTerm] = self[k] if iscomplex else _np.real(self[k]) -# vtape[i] = l; i += 1 -# vtape[i:i + l] = k; i += l -# assert(i == len(vtape)), "Logic Error!" -# fast_vtape, fast_ctape = self.fastpoly.compact(iscomplex) -# assert(_np.allclose(fast_vtape, vtape) and _np.allclose(fast_ctape, ctape)) -# self._check_fast_polynomial() -# return vtape, ctape -# -# def copy(self): -# """ -# Returns a copy of this polynomial. -# -# Returns -# ------- -# Polynomial -# """ -# fast_cpy = self.fastpoly.copy() -# ret = Polynomial(self, self.max_num_vars) -# ret.fastpoly = fast_cpy -# ret._check_fast_polynomial() -# return ret -# -# def map_indices(self, mapfn): -# """ -# Performs a bulk find & replace on this polynomial's variable indices. -# -# This is useful when the variable indices have external significance -# (like being the indices of a gate's parameters) and one want to convert -# to another set of indices (like a parent model's parameters). -# -# Parameters -# ---------- -# mapfn : function -# A function that takes as input an "old" variable-index-tuple -# (a key of this Polynomial) and returns the updated "new" -# variable-index-tuple. -# -# Returns -# ------- -# Polynomial -# """ -# ret = Polynomial({mapfn(k): v for k, v in self.items()}, self.max_num_vars) -# ret.fastpoly = self.fastpoly.map_indices(mapfn) -# self._check_fast_polynomial() -# ret._check_fast_polynomial() -# return ret -# -# def map_indices_inplace(self, mapfn): -# """ -# Performs an in-place find & replace on this polynomial's variable indices. -# -# This is useful when the variable indices have external significance -# (like being the indices of a gate's parameters) and one want to convert -# to another set of indices (like a parent model's parameters). -# -# Parameters -# ---------- -# mapfn : function -# A function that takes as input an "old" variable-index-tuple -# (a key of this Polynomial) and returns the updated "new" -# variable-index-tuple. -# -# Returns -# ------- -# None -# """ -# self._check_fast_polynomial() -# new_items = {mapfn(k): v for k, v in self.items()} -# self.clear() -# self.update(new_items) -# self.fastpoly.map_indices_inplace(mapfn) -# self._check_fast_polynomial() -# -# def mult(self, x): -# """ -# Multiplies this polynomial by another polynomial `x`. -# -# Parameters -# ---------- -# x : Polynomial -# The polynomial to multiply by. -# -# Returns -# ------- -# Polynomial -# The polynomial representing self * x. -# """ -# newpoly = Polynomial({}, self.max_num_vars) -# for k1, v1 in self.items(): -# for k2, v2 in x.items(): -# k = tuple(sorted(k1 + k2)) -# if k in newpoly: newpoly[k] += v1 * v2 -# else: newpoly[k] = v1 * v2 -# -# newpoly.fastpoly = self.fastpoly.mult(x.fastpoly) -# self._check_fast_polynomial() -# newpoly._check_fast_polynomial() -# return newpoly -# -# def scale(self, x): -# """ -# Scale this polynomial by `x` (multiply all coefficients by `x`). -# -# Parameters -# ---------- -# x : float or complex -# The value to scale by. -# -# Returns -# ------- -# None -# """ -# # assume a scalar that can multiply values -# for k in tuple(self.keys()): # I think the tuple() might speed things up (why?) -# self[k] *= x -# self.fastpoly.scale(x) -# self._check_fast_polynomial() -# -# def scalar_mult(self, x): -# """ -# Multiplies this polynomial by a scalar `x`. -# -# Parameters -# ---------- -# x : float or complex -# The value to multiply by. -# -# Returns -# ------- -# Polynomial -# """ -# newpoly = self.copy() -# newpoly.scale(x) -# self._check_fast_polynomial() -# newpoly._check_fast_polynomial() -# return newpoly -# -# def __str__(self): -# def fmt(x): -# if abs(_np.imag(x)) > 1e-6: -# if abs(_np.real(x)) > 1e-6: return "(%.3f+%.3fj)" % (x.real, x.imag) -# else: return "(%.3fj)" % x.imag -# else: return "%.3f" % x.real -# -# termstrs = [] -# sorted_keys = sorted(list(self.keys())) -# for k in sorted_keys: -# varstr = ""; last_i = None; n = 1 -# for i in sorted(k): -# if i == last_i: n += 1 -# elif last_i is not None: -# varstr += "x%d%s" % (last_i, ("^%d" % n) if n > 1 else "") -# n = 1 -# last_i = i -# if last_i is not None: -# varstr += "x%d%s" % (last_i, ("^%d" % n) if n > 1 else "") -# #print("DB: k = ",k, " varstr = ",varstr) -# if abs(self[k]) > 1e-4: -# termstrs.append("%s%s" % (fmt(self[k]), varstr)) -# -# self._check_fast_polynomial() -# if len(termstrs) > 0: -# return " + ".join(termstrs) -# else: return "0" -# -# def __repr__(self): -# return "Poly[ " + str(self) + " ]" -# -# def __add__(self, x): -# newpoly = self.copy() -# if isinstance(x, Polynomial): -# for k, v in x.items(): -# if k in newpoly: newpoly[k] += v -# else: newpoly[k] = v -# newpoly.fastpoly = self.fastpoly + x.fastpoly -# else: # assume a scalar that can be added to values -# for k in newpoly: -# newpoly[k] += x -# newpoly.fastpoly = self.fastpoly + x -# self._check_fast_polynomial() -# newpoly._check_fast_polynomial() -# return newpoly -# -# def __iadd__(self, x): -# """ Does self += x more efficiently """ -# if isinstance(x, Polynomial): -# for k, v in x.items(): -# try: -# self[k] += v -# except KeyError: -# self[k] = v -# self.fastpoly += x.fastpoly -# else: # assume a scalar that can be added to values -# for k in self: -# self[k] += x -# self.fastpoly += x -# self._check_fast_polynomial() -# return self -# -# def __mul__(self, x): -# #if isinstance(x, Polynomial): -# # newpoly = Polynomial() -# # for k1,v1 in self.items(): -# # for k2,v2 in x.items(): -# # k = tuple(sorted(k1+k2)) -# # if k in newpoly: newpoly[k] += v1*v2 -# # else: newpoly[k] = v1*v2 -# #else: -# # # assume a scalar that can multiply values -# # newpoly = self.copy() -# # for k in newpoly: -# # newpoly[k] *= x -# #return newpoly -# if isinstance(x, Polynomial): -# ret = self.mult(x) -# else: # assume a scalar that can multiply values -# ret = self.scalar_mult(x) -# self._check_fast_polynomial() -# ret._check_fast_polynomial() -# return ret -# -# def __rmul__(self, x): -# return self.__mul__(x) -# -# def __imul__(self, x): -# self._check_fast_polynomial() -# if isinstance(x, Polynomial): -# x._check_fast_polynomial() -# newcoeffs = {} -# for k1, v1 in self.items(): -# for k2, v2 in x.items(): -# k = tuple(sorted(k1 + k2)) -# if k in newcoeffs: newcoeffs[k] += v1 * v2 -# else: newcoeffs[k] = v1 * v2 -# self.clear() -# self.update(newcoeffs) -# self.fastpoly *= x.fastpoly -# self._check_fast_polynomial() -# else: -# self.scale(x) -# self._check_fast_polynomial() -# return self -# -# def __pow__(self, n): -# ret = Polynomial({(): 1.0}, self.max_num_vars) # max_order updated by mults below -# cur = self -# for i in range(int(_np.floor(_np.log2(n))) + 1): -# rem = n % 2 # gets least significant bit (i-th) of n -# if rem == 1: ret *= cur # add current power of x (2^i) if needed -# cur = cur * cur # current power *= 2 -# n //= 2 # shift bits of n right -# ret.fastpoly = self.fastpoly ** n -# ret._check_fast_polynomial() -# self._check_fast_polynomial() -# return ret -# -# def __copy__(self): -# ret = self.copy() -# ret._check_fast_polynomial() -# self._check_fast_polynomial() -# return ret -# -# def to_rep(self): -# """ -# Construct a representation of this polynomial. -# -# "Representations" are lightweight versions of objects used to improve -# the efficiency of intensely computational tasks. Note that Polynomial -# representations must have the same `max_order` and `max_num_vars` in -# order to interact with each other (add, multiply, etc.). -# -# Parameters -# ---------- -# max_num_vars : int, optional -# The maximum number of variables the represenatation is allowed to -# have (x_0 to x_(`max_num_vars-1`)). This sets the maximum allowed -# variable index within the representation. -# -# Returns -# ------- -# PolynomialRep -# """ -# # Set max_num_vars (determines based on coeffs if necessary) -# max_num_vars = self.max_num_vars -# default_max_vars = 0 if len(self) == 0 else \ -# max([(max(k) + 1 if k else 0) for k in self.keys()]) -# if max_num_vars is None: -# max_num_vars = default_max_vars -# else: -# assert(default_max_vars <= max_num_vars) -# -# vindices_per_int = Polynomial._vindices_per_int(max_num_vars) -# -# def vinds_to_int(vinds): -# """ Convert tuple index of ints to single int given max_numvars """ -# ints_in_key = int(_np.ceil(len(vinds) / vindices_per_int)) -# ret_tup = [] -# for k in range(ints_in_key): -# ret = 0; m = 1 -# for i in vinds[k * vindices_per_int:(k + 1) * vindices_per_int]: # last tuple index=most significant -# assert(i < max_num_vars), "Variable index exceed maximum!" -# ret += (i + 1) * m -# m *= max_num_vars + 1 -# assert(ret >= 0), "vinds = %s -> %d!!" % (str(vinds), ret) -# ret_tup.append(ret) -# return tuple(ret_tup) -# -# int_coeffs = {vinds_to_int(k): v for k, v in self.items()} -# -# # (max_num_vars+1) ** vindices_per_int <= 2**PLATFORM_BITS, so: -# # vindices_per_int * log2(max_num_vars+1) <= PLATFORM_BITS -# vindices_per_int = int(_np.floor(PLATFORM_BITS / _np.log2(max_num_vars + 1))) -# self._check_fast_polynomial() -# -# return _PolynomialRep(int_coeffs, max_num_vars, vindices_per_int) +FASTPolynomial = Polynomial +# ^ That alias is deprecated and should be removed. def bulk_load_compact_polynomials(vtape, ctape, keep_compact=False, max_num_vars=100): diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 8ecf63183..7624d1f98 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -2667,7 +2667,6 @@ def replace_layers_with_aliases(self, alias_dict): layers = layers[:i] + c._labels + layers[i + 1:] return Circuit._fastinit(layers, self._line_labels, editable=False, occurrence=self._occurrence_id) - def change_gate_library(self, compilation, allowed_filter=None, allow_unchanged_gates=False, depth_compression=True, one_q_gate_relations=None): """ @@ -3556,7 +3555,6 @@ def cnt(obj): # obj is either a simple label or a list return sum([cnt(layer_lbl) for layer_lbl in self._labels]) - def _togrid(self, identity_name): """ return a list-of-lists rep? """ d = self.num_layers diff --git a/pygsti/circuits/circuitconstruction.py b/pygsti/circuits/circuitconstruction.py index 35b8acd39..d6bc979c1 100644 --- a/pygsti/circuits/circuitconstruction.py +++ b/pygsti/circuits/circuitconstruction.py @@ -183,10 +183,6 @@ def repeat_with_max_length(x, max_length, assert_at_least_one_rep=False): """ return repeat(x, repeat_count_with_max_length(x, max_length, assert_at_least_one_rep), assert_at_least_one_rep) -#Useful for anything? -#def repeat_empty(x,max_length,assert_at_least_one_rep=False): -# return () - def repeat_and_truncate(x, n, assert_at_least_one_rep=False): """ diff --git a/pygsti/data/datacomparator.py b/pygsti/data/datacomparator.py index 5b8f38b99..f5e9e266e 100644 --- a/pygsti/data/datacomparator.py +++ b/pygsti/data/datacomparator.py @@ -49,11 +49,6 @@ def _loglikelihood(p_list, n_list): output += _xlogy(n_list[i], pVal) return output -# Only used by the rectify data function, which is commented out, -# so this is also commented out. -# def loglikelihoodRatioObj(alpha,n_list_list,dof): -# return _np.abs(dof - _loglikelihood_ratio(alpha*n_list_list)) - def _loglikelihood_ratio(n_list_list): """ diff --git a/pygsti/data/hypothesistest.py b/pygsti/data/hypothesistest.py index 87046853a..985b2e9ac 100644 --- a/pygsti/data/hypothesistest.py +++ b/pygsti/data/hypothesistest.py @@ -253,13 +253,6 @@ def _initialize_to_weighted_holms_test(self): self.passing_graph[hind, :] = _np.ones(len(self.hypotheses), float) / (len(self.hypotheses) - 1) self.passing_graph[hind, hind] = 0. - # def _check_permissible(self): - # """ - # Todo - # """ - # # Todo : test that the graph is acceptable. - # return True - def add_pvalues(self, pvalues): """ Insert the p-values for the hypotheses. diff --git a/pygsti/drivers/bootstrap.py b/pygsti/drivers/bootstrap.py index 8a28881a4..cc469a7e4 100644 --- a/pygsti/drivers/bootstrap.py +++ b/pygsti/drivers/bootstrap.py @@ -492,52 +492,3 @@ def _to_rms_model(gs_list, target_gs): output_gs = target_gs.copy() output_gs.from_vector(_np.mean(gsVecArray)) return output_gs - -#Unused? -#def gateset_jtracedist(mdl,target_model,mx_basis="gm"): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(target_model.operations.keys()): -# output[i] = _tools.jtracedist(mdl.operations[gate],target_model.operations[gate],mx_basis=mx_basis) -## print output -# return output -# -#def gateset_entanglement_fidelity(mdl,target_model): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(target_model.operations.keys()): -# output[i] = _tools.entanglement_fidelity(mdl.operations[gate],target_model.operations[gate]) -# return output -# -#def gateset_decomp_angle(mdl): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(mdl.operations.keys()): -# output[i] = _tools.decompose_gate_matrix(mdl.operations[gate]).get('pi rotations',0) -# return output -# -#def gateset_decomp_decay_diag(mdl): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(mdl.operations.keys()): -# output[i] = _tools.decompose_gate_matrix(mdl.operations[gate]).get('decay of diagonal rotation terms',0) -# return output -# -#def gateset_decomp_decay_offdiag(mdl): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(mdl.operations.keys()): -# output[i] = _tools.decompose_gate_matrix(mdl.operations[gate]).get('decay of off diagonal rotation terms',0) -# return output -# -##def gateset_fidelity(mdl,target_model,mx_basis="gm"): -## output = _np.zeros(3,dtype=float) -## for i, gate in enumerate(target_model.operations.keys()): -## output[i] = _tools.fidelity(mdl.operations[gate],target_model.operations[gate]) -## return output -# -#def gateset_diamonddist(mdl,target_model,mx_basis="gm"): -# output = _np.zeros(3,dtype=float) -# for i, gate in enumerate(target_model.operations.keys()): -# output[i] = _tools.diamonddist(mdl.operations[gate],target_model.operations[gate],mx_basis=mx_basis) -# return output -# -#def spamrameter(mdl): -# firstRho = list(mdl.preps.keys())[0] -# firstE = list(mdl.effects.keys())[0] -# return _np.dot(mdl.preps[firstRho].T,mdl.effects[firstE])[0,0] diff --git a/pygsti/evotypes/chp/opreps.py b/pygsti/evotypes/chp/opreps.py index 4c992d224..94b6279ce 100644 --- a/pygsti/evotypes/chp/opreps.py +++ b/pygsti/evotypes/chp/opreps.py @@ -49,11 +49,6 @@ def adjoint_acton_random(self, state, rand_state): def _chp_ops(self, seed_or_state=None): return self.base_chp_ops - #def chp_str(self, seed_or_state=None): - # op_str = '\n'.join(self.chp_ops(seed_or_state=seed_or_state)) - # if len(op_str) > 0: op_str += '\n' - # return op_str - def to_dense(self, on_space): try: str_ops = str(self._chp_ops()) @@ -217,63 +212,7 @@ def __init__(self, stochastic_basis, basis, initial_rates, seed_or_state, state_ super(OpRepStochastic, self).__init__(basis, _np.array(rates, 'd'), reps, seed_or_state, state_space) - #OLD - #self.basis = basis - #assert (basis.name == 'pp'), "Only Pauli basis is allowed for 'chp' evotype" - # - #if isinstance(seed_or_state, _RandomState): - # self.rand_state = seed_or_state - #else: - # self.rand_state = _RandomState(seed_or_state) - # - ##TODO: need to fix this: `basis` above functions as basis to make superoperators out of, but here we have - ## a CHP stochastic op which is given a basis for the space - e.g. a dim=2 vector space for 1 qubit, so - ## we need to distinguish/specify the basis better for this... and what about rate_poly_dicts (see svterm) - #nqubits = state_space.num_qubits - #assert(self.basis.dim == 4**nqubits), "Must have an integral number of qubits" - # - #std_chp_ops = _itgs.standard_gatenames_chp_conversions() - # - ## For CHP, need to make a Composed + EmbeddedOp for the super operators - ## For lower overhead, make this directly using the rep instead of with objects - #self.stochastic_superop_reps = [] - #for label in self.basis.labels[1:]: - # combined_chp_ops = [] - # - # for i, pauli in enumerate(label): - # name = 'Gi' if pauli == "I" else 'G%spi' % pauli.lower() - # chp_op = std_chp_ops[name] - # chp_op_targeted = [op.replace('0', str(i)) for op in chp_op] - # combined_chp_ops.extend(chp_op_targeted) - # - # sub_rep = OpRep(combined_chp_ops, state_space) - # self.stochastic_superop_reps.append(sub_rep) - #self.rates = initial_rates - #super(OpRepStochastic, self).__init__([], state_space) # don't store any chp_ops in base - def update_rates(self, rates): unitary_rates = [1 - sum(rates)] + list(rates) self.rates[:] = rates self.update_unitary_rates(unitary_rates) - - #TODO REMOVE - covered by OpRepRandomUnitary - #def chp_ops(self, seed_or_state=None): - # # Optionally override RNG for this call - # if seed_or_state is not None: - # if isinstance(seed_or_state, _np.random.RandomState): - # rand_state = seed_or_state - # else: - # rand_state = _np.random.RandomState(seed_or_state) - # else: - # rand_state = self.rand_state - # - # rates = self.rates - # all_rates = [*rates, 1.0 - sum(rates)] # Include identity so that probabilities are 1 - # index = rand_state.choice(self.basis.size, p=all_rates) - # - # # If final entry, no operation selected - # if index == self.basis.size - 1: - # return [] - # - # rep = self.stochastic_superop_reps[index] - # return rep._chp_ops() diff --git a/pygsti/evotypes/chp/statereps.py b/pygsti/evotypes/chp/statereps.py index b17785266..7ed792363 100644 --- a/pygsti/evotypes/chp/statereps.py +++ b/pygsti/evotypes/chp/statereps.py @@ -40,16 +40,6 @@ def __init__(self, chp_ops, state_space): def num_qubits(self): return self.state_space.num_qubits - #REMOVE - #def chp_ops(self, seed_or_state=None): - # return self.base_chp_ops - - #REMOVE - #def chp_str(self, seed_or_state=None): - # op_str = '\n'.join(self.chp_ops(seed_or_state=seed_or_state)) - # if len(op_str) > 0: op_str += '\n' - # return op_str - def copy(self): return StateRep(self.chp_ops, self.state_space) @@ -87,25 +77,3 @@ def reps_have_changed(self): def actionable_staterep(self): state_rep = self.state_rep.actionable_staterep() return self.op_rep.acton(state_rep) - -#REMOVE -# def chp_ops(self, seed_or_state=None): -# return self.state_rep.chp_ops(seed_or_state=seed_or_state) \ -# + self.op_rep.chp_ops(seed_or_state=seed_or_state) - -# TODO: Untested, only support computational and composed for now -#class StateRepTensorProduct(StateRep): -# def __init__(self, factor_state_reps, state_space): -# self.factor_reps = factor_state_reps -# super(StateRepTensorProduct, self).__init__([], state_space) -# self.reps_have_changed() -# -# def reps_have_changed(self): -# chp_ops = [] -# current_iqubit = 0 -# for factor in self.factor_reps: -# local_to_tp_index = {str(iloc): str(itp) for iloc, itp in -# enumerate(range(current_iqubit, current_iqubit + factor.num_qubits))} -# chp_ops.extend([_update_chp_op(op, local_to_tp_index) for op in self.chp_ops]) -# current_iqubit += factor.num_qubits -# self.chp_ops = chp_ops diff --git a/pygsti/evotypes/densitymx_slow/effectreps.py b/pygsti/evotypes/densitymx_slow/effectreps.py index 39a50a6be..b013978b8 100644 --- a/pygsti/evotypes/densitymx_slow/effectreps.py +++ b/pygsti/evotypes/densitymx_slow/effectreps.py @@ -105,11 +105,6 @@ def __init__(self, povm_factors, effect_labels, state_space): super(EffectRepTensorProduct, self).__init__(state_space) self.factor_effects_have_changed() - #TODO: fix this: - #def __reduce__(self): - # return (EffectRepTensorProduct, - # (self.kron_array, self.factor_dims, self.nfactors, self.max_factor_dim, self.dim)) - def to_dense(self, on_space, outvec=None): if on_space not in ('minimal', 'HilbertSchmidt'): @@ -164,22 +159,6 @@ def _fill_fast_kron(self): def factor_effects_have_changed(self): self._fill_fast_kron() # updates effect reps - #def to_dense(self): - # if len(self.factors) == 0: return _np.empty(0, complex if self._evotype == "statevec" else 'd') - # #NOTE: moved a fast version of to_dense to replib - could use that if need a fast to_dense call... - # - # factorPOVMs = self.factors - # ret = factorPOVMs[0][self.effectLbls[0]].to_dense() - # for i in range(1, len(factorPOVMs)): - # ret = _np.kron(ret, factorPOVMs[i][self.effectLbls[i]].to_dense()) - # return ret - # elif self._evotype == "stabilizer": - # # each factor is a StabilizerEffectVec - # raise ValueError("Cannot convert Stabilizer tensor product effect to an array!") - # # should be using effect.outcomes property... - # else: # self._evotype in ("svterm","cterm") - # raise NotImplementedError("to_dense() not implemented for %s evolution type" % self._evotype) - class EffectRepComposed(EffectRep): def __init__(self, op_rep, effect_rep, op_id, state_space): diff --git a/pygsti/evotypes/densitymx_slow/opreps.py b/pygsti/evotypes/densitymx_slow/opreps.py index 74a52c34d..ab63e3ce0 100644 --- a/pygsti/evotypes/densitymx_slow/opreps.py +++ b/pygsti/evotypes/densitymx_slow/opreps.py @@ -270,10 +270,6 @@ def update_rates(self, rates): self.rates[:] = rates self.update_unitary_rates(unitary_rates) -#class OpRepClifford(OpRep): # TODO? -# #def __init__(self, unitarymx, symplecticrep): -# # pass - class OpRepComposed(OpRep): @@ -375,13 +371,6 @@ def __init__(self, state_space, target_labels, embedded_rep): self.offset = sum(blocksizes[0:self.active_block_index]) super(OpRepEmbedded, self).__init__(state_space) - #def __reduce__(self): - # return (DMOpRepEmbedded, (self.embedded, - # self.num_basis_els, self.action_inds, - # self.blocksizes, self.embeddedDim, - # self.ncomponents, self.active_block_index, - # self.nblocks, self.dim)) - def _acton_other_blocks_trivially(self, output_state, state): offset = 0 for iBlk, blockSize in enumerate(self.blocksizes): @@ -466,15 +455,6 @@ def set_exp_params(self, mu, eta, m_star, s): def exp_params(self): return (self.mu, self.eta, self.m_star, self.s) - #def __reduce__(self): - # if self.unitary_postfactor is None: - # return (DMOpRepLindblad, (self.errorgen_rep, self.mu, self.eta, self.m_star, self.s, - # _np.empty(0, 'd'), _np.empty(0, _np.int64), _np.zeros(1, _np.int64))) - # else: - # return (DMOpRepLindblad, (self.errorgen_rep, self.mu, self.eta, self.m_star, self.s, - # self.unitary_postfactor.data, self.unitary_postfactor.indices, - # self.unitary_postfactor.indptr)) - def acton(self, state): """ Act this gate map on an input state """ statedata = state.data.copy() # must COPY because _custom... call below *modifies* "b" arg diff --git a/pygsti/evotypes/stabilizer/effectreps.pyx b/pygsti/evotypes/stabilizer/effectreps.pyx index 5efc6d8c8..9bc68e028 100644 --- a/pygsti/evotypes/stabilizer/effectreps.pyx +++ b/pygsti/evotypes/stabilizer/effectreps.pyx @@ -39,10 +39,6 @@ cdef class EffectRep(_basereps_cython.EffectRep): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**(self.c_effect._n) # assume "unitary evolution"-type mode - def probability(self, StateRep state not None): #unnecessary (just put in signature): cdef StateRep st = state return self.c_effect.probability(state.c_state) diff --git a/pygsti/evotypes/stabilizer/opreps.pyx b/pygsti/evotypes/stabilizer/opreps.pyx index c8be47755..3ed5f0ee8 100644 --- a/pygsti/evotypes/stabilizer/opreps.pyx +++ b/pygsti/evotypes/stabilizer/opreps.pyx @@ -40,10 +40,6 @@ cdef class OpRep(_basereps_cython.OpRep): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**(self.nqubits) # assume "unitary evolution"-type mode - def acton(self, StateRep state not None): cdef INT n = self.c_rep._n cdef INT namps = state.c_state._namps diff --git a/pygsti/evotypes/stabilizer/statereps.pyx b/pygsti/evotypes/stabilizer/statereps.pyx index 13cb6b0c9..782f10558 100644 --- a/pygsti/evotypes/stabilizer/statereps.pyx +++ b/pygsti/evotypes/stabilizer/statereps.pyx @@ -49,10 +49,6 @@ cdef class StateRep(_basereps_cython.StateRep): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**(self.c_state._n) # assume "unitary evolution"-type mode - def actionable_staterep(self): # return a state rep that can be acted on by op reps or mapped to # a probability/amplitude by POVM effect reps. diff --git a/pygsti/evotypes/stabilizer/termreps.pyx b/pygsti/evotypes/stabilizer/termreps.pyx index dc83b3002..728673a32 100644 --- a/pygsti/evotypes/stabilizer/termreps.pyx +++ b/pygsti/evotypes/stabilizer/termreps.pyx @@ -107,10 +107,3 @@ cdef class TermRep(_basereps_cython.TermRep): return TermRep(self.coeff.copy(), self.magnitude, self.logmagnitude, self.pre_state, self.post_state, self.pre_effect, self.post_effect, self.pre_ops, self.post_ops) - - #Not needed - and this implementation is quite right as it will need to change - # the ordering of the pre/post ops also. - #def conjugate(self): - # return TermRep(self.coeff.copy(), self.magnitude, self.logmagnitude, - # self.post_state, self.pre_state, self.post_effect, self.pre_effect, - # self.post_ops, self.pre_ops) diff --git a/pygsti/evotypes/stabilizer_slow/effectreps.py b/pygsti/evotypes/stabilizer_slow/effectreps.py index 5bf2dfe33..6e5d8bb21 100644 --- a/pygsti/evotypes/stabilizer_slow/effectreps.py +++ b/pygsti/evotypes/stabilizer_slow/effectreps.py @@ -23,10 +23,6 @@ def __init__(self, state_space): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**self.nqubits # assume "unitary evolution"-type mode - def probability(self, state): return state.sframe.measurement_probability(self.zvals, check=True) # use check for now? @@ -37,10 +33,6 @@ def to_dense(self, on_space): return _mt.zvals_to_dense(self.zvals, superket=bool(on_space not in ('minimal', 'Hilbert'))) -#class EffectRepConjugatedState(EffectRep): -# pass # TODO - this should be possible - - class EffectRepComputational(EffectRep): def __init__(self, zvals, basis, state_space): @@ -49,17 +41,6 @@ def __init__(self, zvals, basis, state_space): assert(self.state_space.num_qubits == len(self.zvals)) super(EffectRepComputational, self).__init__(state_space) - #@property - #def outcomes(self): - # """ - # The 0/1 outcomes identifying this effect within its StabilizerZPOVM - # - # Returns - # ------- - # numpy.ndarray - # """ - # return self.zvals - def __str__(self): nQubits = len(self.zvals) s = "Stabilizer effect vector for %d qubits with outcome %s" % (nQubits, str(self.zvals)) @@ -80,9 +61,6 @@ def __init__(self, op_rep, effect_rep, op_id, state_space): super(EffectRepComposed, self).__init__(state_space) - #def __reduce__(self): - # return (EffectRepComposed, (self.op_rep, self.effect_rep, self.op_id, self.state_space)) - def probability(self, state): state = self.op_rep.acton(state) # *not* acton_adjoint return self.effect_rep.probability(state) diff --git a/pygsti/evotypes/stabilizer_slow/opreps.py b/pygsti/evotypes/stabilizer_slow/opreps.py index 313e6aad8..ae3b2bea4 100644 --- a/pygsti/evotypes/stabilizer_slow/opreps.py +++ b/pygsti/evotypes/stabilizer_slow/opreps.py @@ -34,10 +34,6 @@ def adjoint_acton(self, state): def nqubits(self): return self.state_space.num_qubits - #@property - #def dim(self): - # return 2**(self.nqubits) # assume "unitary evolution"-type mode - class OpRepClifford(OpRep): def __init__(self, unitarymx, symplecticrep, basis, state_space): diff --git a/pygsti/evotypes/stabilizer_slow/statereps.py b/pygsti/evotypes/stabilizer_slow/statereps.py index b47417f08..bb71b74e0 100644 --- a/pygsti/evotypes/stabilizer_slow/statereps.py +++ b/pygsti/evotypes/stabilizer_slow/statereps.py @@ -42,10 +42,6 @@ def amps(self): def nqubits(self): return self.sframe.n - #@property - #def dim(self): - # return 2**self.nqubits # assume "unitary evolution"-type mode - def actionable_staterep(self): # return a state rep that can be acted on by op reps or mapped to # a probability/amplitude by POVM effect reps. diff --git a/pygsti/evotypes/statevec/termreps.pyx b/pygsti/evotypes/statevec/termreps.pyx index 07ba8f61a..dc73ae1d0 100644 --- a/pygsti/evotypes/statevec/termreps.pyx +++ b/pygsti/evotypes/statevec/termreps.pyx @@ -112,13 +112,6 @@ cdef class TermRep(_basereps_cython.TermRep): self.pre_state, self.post_state, self.pre_effect, self.post_effect, self.pre_ops, self.post_ops) - #Not needed - and this implementation is quite right as it will need to change - # the ordering of the pre/post ops also. - #def conjugate(self): - # return TermRep(self.coeff.copy(), self.magnitude, self.logmagnitude, - # self.post_state, self.pre_state, self.post_effect, self.pre_effect, - # self.post_ops, self.pre_ops) - #Note: to use direct term reps (numerical coeffs) we'll need to update # what the members are called and add methods as was done for TermRep. diff --git a/pygsti/evotypes/statevec_slow/effectreps.py b/pygsti/evotypes/statevec_slow/effectreps.py index d1a14ebc6..2863233e5 100644 --- a/pygsti/evotypes/statevec_slow/effectreps.py +++ b/pygsti/evotypes/statevec_slow/effectreps.py @@ -180,9 +180,6 @@ def __init__(self, op_rep, effect_rep, op_id, state_space): super(EffectRepComposed, self).__init__(effect_rep.state_space) - #def __reduce__(self): - # return (EffectRepComposed, (self.op_rep, self.effect_rep, self.op_id, self.state_space)) - def probability(self, state): state = self.op_rep.acton(state) # *not* acton_adjoint return self.effect_rep.probability(state) diff --git a/pygsti/evotypes/statevec_slow/opreps.py b/pygsti/evotypes/statevec_slow/opreps.py index b60fadd51..817ad1e55 100644 --- a/pygsti/evotypes/statevec_slow/opreps.py +++ b/pygsti/evotypes/statevec_slow/opreps.py @@ -107,11 +107,6 @@ def __init__(self, name, basis, state_space): super(OpRepStandard, self).__init__(U, basis, state_space) -#class OpRepStochastic(OpRepDense): -# - maybe we could add this, but it wouldn't be a "dense" op here, -# perhaps we need to change API? - - class OpRepComposed(OpRep): # exactly the same as densitymx case def __init__(self, factor_op_reps, state_space): diff --git a/pygsti/extras/__init__.py b/pygsti/extras/__init__.py index 83d98d445..cbdf0d169 100644 --- a/pygsti/extras/__init__.py +++ b/pygsti/extras/__init__.py @@ -9,5 +9,4 @@ """ Container for beyond-GST sub-packages """ # from . import drift -# from . import rb # temporarily removed until RB analysis is fixed # from . import rpe diff --git a/pygsti/extras/interpygate/core.py b/pygsti/extras/interpygate/core.py index d7b205146..5f25ab20e 100644 --- a/pygsti/extras/interpygate/core.py +++ b/pygsti/extras/interpygate/core.py @@ -241,15 +241,6 @@ def create_object(self, args=None, sslbls=None): return InterpolatedDenseOp(target_op, self.base_interpolator, self.aux_interpolator, self.to_vector(), _np.array(args), self._argument_indices) - #def write(self, dirname): - # dirname = _pathlib.Path(dirname) - # with open(str(dirname / "targetop.pkl"), 'wb') as f: - # _pickle.dump(self.target_op, f) - # _np.save(dirname / "paramvec.np", self._paramvec_with_time) - # self.base_interpolator.write(dirname / "base.interp") - # if self.aux_interpolator is not None: - # self.aux_interptolator.write(dirname / "aux.interp") - @property def num_params(self): return len(self._paramvec) @@ -267,24 +258,6 @@ def from_vector(self, v, close=False, dirty_value=True): class InterpolatedDenseOp(_DenseOperator): - #@classmethod - #def from_dir(cls, dirname): - # dirname = _pathlib.Path(dirname) - # with open(str(dirname / "targetop.pkl"), 'rb') as f: - # target_op = _pickle.load(f) - # pt = _np.load(dirname / "paramvec.np") - # base_interp = InterpolatedQuantity.from_file(dirname / "base.interp") - # aux_interp = InterpolatedQuantity.from_file(dirname / "aux.interp") \ - # if (dirname / "aux.interp").exists() else None - # - # if base_interp.times is not None: - # tm = pt[-1] - # pt = pt[0:-1] - # else: - # tm = None - # - # return cls(target_op, base_interp, aux_interp, pt, tm) - @classmethod def create_by_interpolating_physical_process(cls, target_op, physical_process, parameter_ranges=None, parameter_points=None, comm=None, @@ -391,15 +364,6 @@ def __init__(self, target_op, base_interpolator, aux_interpolator=None, initial_ # initialize object self.from_vector(self._paramvec) - #def write(self, dirname): - # dirname = _pathlib.Path(dirname) - # with open(str(dirname / "targetop.pkl"), 'wb') as f: - # _pickle.dump(self.target_op, f) - # _np.save(dirname / "paramvec.np", self._paramvec_with_time) - # self.base_interpolator.write(dirname / "base.interp") - # if self.aux_interpolator is not None: - # self.aux_interptolator.write(dirname / "aux.interp") - @property def num_params(self): return len(self._paramvec) diff --git a/pygsti/extras/rb/__init__.py b/pygsti/extras/rb/__init__.py deleted file mode 100644 index fd0109f2d..000000000 --- a/pygsti/extras/rb/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -""" Randomized Benchmarking Sub-package """ -#*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). -# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights -# in this software. -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -# in compliance with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. -#*************************************************************************************************** - -from . import dataset -from . import simulate -from .benchmarker import Benchmarker diff --git a/pygsti/extras/rb/benchmarker.py b/pygsti/extras/rb/benchmarker.py deleted file mode 100644 index 03018b341..000000000 --- a/pygsti/extras/rb/benchmarker.py +++ /dev/null @@ -1,957 +0,0 @@ -""" Encapsulates RB results and dataset objects """ -#*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). -# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights -# in this software. -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -# in compliance with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. -#*************************************************************************************************** - -import copy as _copy -import warnings as _warnings -from itertools import cycle as _cycle - -import numpy as _np - -from pygsti.data import dataset as _stdds, multidataset as _multids, datacomparator as _dcomp -from pygsti.models import oplessmodel as _oplessmodel - -#from . import analysis as _analysis -_analysis = None # MOVED - and this module is deprecated & broken now, so just set to None - - -class Benchmarker(object): - """ - todo - - """ - - def __init__(self, specs, ds=None, summary_data=None, predicted_summary_data=None, - dstype='standard', success_outcome='success', success_key='target', - dscomparator=None): - """ - todo - - dstype : ('success-fail', 'standard') - - specs: dictionary of (name, RBSpec) key-value pairs. The names are arbitrary - - """ - if ds is not None: - assert(dstype in ('success-fail', 'standard', 'dict')), "Unknown format for the dataset!" - self.dstype = dstype - if self.dstype == 'success-fail' or self.dstype == 'dict': - self.success_outcome = success_outcome - else: - self.success_outcome = None - if self.dstype == 'standard' or self.dstype == 'dict': - self.success_key = success_key - else: - self.success_key = None - - if dstype == 'dict': - assert('standard' in ds.keys() and 'success-fail' in ds.keys()) - self.multids = ds - else: - self.multids = {} - if isinstance(ds, _stdds.DataSet): - self.multids[self.dstype] = _multids.MultiDataSet() - self.multids[self.dstype].add_dataset(0, ds) - elif isinstance(ds, list): - self.multids[self.dstype] = _multids.MultiDataSet() - for i, subds in enumerate(ds): - self.multids[self.dstype].add_dataset(i, ds) - elif isinstance(ds, _multids.MultiDataSet): - self.multids[self.dstype] = ds - else: - raise ValueError("If specified, `ds` must be a DataSet, a list of DataSets," - + " a MultiDataSet or a dictionary of MultiDataSets!") - - self.numpasses = len(self.multids[list(self.multids.keys())[0]]) - else: - assert(summary_data is not None), "Must specify one or more DataSets or a summary data dict!" - self.multids = None - self.success_outcome = None - self.success_key = None - self - - self.dscomparator = _copy.deepcopy(dscomparator) - - self._specs = tuple(specs.values()) - self._speckeys = tuple(specs.keys()) - - if summary_data is None: - self.pass_summary_data = {} - self.global_summary_data = {} - self.aux = {} - else: - assert(isinstance(summary_data, dict)), "The summary data must be a dictionary" - self.pass_summary_data = summary_data['pass'].copy() - self.global_summary_data = summary_data['global'].copy() - self.aux = summary_data.get('aux', {}).copy() - if self.multids is None: - arbqubits = self._specs[0].get_structure()[0] - arbkey = list(self.pass_summary_data[0][arbqubits].keys())[0] - arbdepth = list(self.pass_summary_data[0][arbqubits][arbkey].keys())[0] - self.numpasses = len(self.pass_summary_data[0][arbqubits][arbkey][arbdepth]) - - if predicted_summary_data is None: - self.predicted_summary_data = {} - else: - self.predicted_summary_data = predicted_summary_data.copy() - - def select_volumetric_benchmark_regions(self, depths, boundary, widths='all', datatype='success_probabilities', - statistic='mean', merit='aboveboundary', specs=None, aggregate=True, - passnum=None, rescaler='auto'): - - # Selected regions encodes the selected regions, but in the slighty obtuse format of a dictionary of spec - # indices and a list of tuples of qubit regions. (so, e.g., if 1- and 2-qubit circuit are run in parallel - # the width-1 and width-2 spec chosen could by encoded as the index of that spec and a length-2 list of those - # regions.). A less obtuse way to represent the region selection should maybe be used in the future. - selected_regions = {} - assert(statistic in ('max', 'mean', 'min')) - - if specs is None: - specs = self._specs - - specsbywidth = {} - for ind, structure in specs.items(): - for qs in structure: - w = len(qs) - if widths == 'all' or w in widths: - if w not in specsbywidth.keys(): - specsbywidth[w] = [] - specsbywidth[w].append((ind, qs)) - - if not aggregate: - assert(passnum is not None), "Must specify the passnumber data to use for selection if not aggregating!" - - for w, specsforw in specsbywidth.items(): - - if len(specsforw) == 1: # There's no decision to make: only one benchmark of one region of the size w. - (ind, qs) = specsforw[0] - if ind not in selected_regions: - selected_regions[ind] = [qs, ] - else: - selected_regions[ind].append(qs) - - else: # There's data for more than one region (and/or multiple benchmarks of a single region) of size w - best_boundary_index = 0 - best_vb_at_best_boundary_index = None - for (ind, qs) in specsforw: - vbdata = self.volumetric_benchmark_data(depths, widths=[w, ], datatype=datatype, - statistic=statistic, specs={ind: [qs, ]}, - aggregate=aggregate, rescaler=rescaler)['data'] - # Only looking at 1 width, so drop the width key, and keep only the depths with data - if not aggregate: - vbdata = {d: vbdata[d][w][passnum] for d in vbdata.keys() if w in vbdata[d].keys()} - else: - vbdata = {d: vbdata[d][w] for d in vbdata.keys() if w in vbdata[d].keys()} - - # We calcluate the depth index of the largest depth at which the data is above/below the boundary, - # ignoring cases where there's data missing at some depths as long as we're still above/below the - # boundard at a larger depth. - if merit == 'aboveboundary': - x = [vbdata[d] > boundary if d in vbdata.keys() else None for d in depths] - if merit == 'belowboundary': - x = [vbdata[d] < boundary if d in vbdata.keys() else None for d in depths] - try: - x = x[:x.index(False)] - except: - pass - x.reverse() - try: - boundary_index = len(x) - 1 - x.index(True) - #print("There's a non-zero boundary!", str(w), qs) - except: - boundary_index = 0 - #print("Zero boundary!", str(w), qs) - - if boundary_index > best_boundary_index: - best_boundary_index = boundary_index - selected_region_at_w = (ind, qs) - best_vb_at_best_boundary_index = vbdata[depths[boundary_index]] - elif boundary_index == best_boundary_index: - if best_vb_at_best_boundary_index is None: - # On first run through we automatically select that region - selected_region_at_w = (ind, qs) - best_vb_at_best_boundary_index = vbdata[depths[boundary_index]] - else: - if merit == 'aboveboundary' \ - and vbdata[depths[boundary_index]] > best_vb_at_best_boundary_index: - selected_region_at_w = (ind, qs) - best_vb_at_best_boundary_index = vbdata[depths[boundary_index]] - if merit == 'belowboundary' \ - and vbdata[depths[boundary_index]] < best_vb_at_best_boundary_index: - selected_region_at_w = (ind, qs) - best_vb_at_best_boundary_index = vbdata[depths[boundary_index]] - else: - pass - - (ind, qs) = selected_region_at_w - if ind not in selected_regions: - selected_regions[ind] = [qs, ] - else: - selected_regions[ind].append(qs) - - return selected_regions - - def volumetric_benchmark_data(self, depths, widths='all', datatype='success_probabilities', - statistic='mean', specs=None, aggregate=True, rescaler='auto'): - - # maxmax : max over all depths/widths larger or equal - # minmin : min over all deoths/widths smaller or equal. - - assert(statistic in ('max', 'mean', 'min', 'dist', 'maxmax', 'minmin')) - - if isinstance(widths, str): - assert(widths == 'all') - else: - assert(isinstance(widths, list) or isinstance(widths, tuple)) - - if specs is None: # If we're not given a filter, we use all of the data. - specs = {i: [qs for qs in spec.get_structure()] for i, spec in enumerate(self._specs)} - - width_to_spec = {} - for i, structure in specs.items(): - for qs in structure: - w = len(qs) - if widths == 'all' or w in widths: - if w not in width_to_spec: - width_to_spec[w] = (i, qs) - else: - raise ValueError(("There are multiple qubit subsets of size {} benchmarked! " - "Cannot have specs as None!").format(w)) - - if widths == 'all': - widths = list(width_to_spec.keys()) - widths.sort() - else: - assert(set(widths) == set(list(width_to_spec.keys()))) - - if isinstance(rescaler, str): - if rescaler == 'auto': - if datatype == 'success_probabilities': - def rescale_function(data, width): - return list((_np.array(data) - 1 / 2**width) / (1 - 1 / 2**width)) - else: - def rescale_function(data, width): - return data - elif rescaler == 'none': - - def rescale_function(data, width): - return data - - else: - raise ValueError("Unknown rescaling option!") - - else: - rescale_function = rescaler - - # if samecircuitpredictions: - # predvb = {d: {} for d in depths} - # else: - # predvb = None - - qs = self._specs[0].get_structure()[0] # An arbitrary key - if datatype in self.pass_summary_data[0][qs].keys(): - datadict = self.pass_summary_data - globaldata = False - elif datatype in self.global_summary_data[0][qs].keys(): - datadict = self.global_summary_data - globaldata = True - else: - raise ValueError("Unknown datatype!") - - if aggregate or globaldata: - vb = {d: {} for d in depths} - fails = {d: {} for d in depths} - else: - vb = [{d: {} for d in depths} for i in range(self.numpasses)] - fails = [{d: {} for d in depths} for i in range(self.numpasses)] - - if len(self.predicted_summary_data) > 0: - arbkey = list(self.predicted_summary_data.keys())[0] - dopredictions = datatype in self.predicted_summary_data[arbkey][0][qs].keys() - if dopredictions: - pkeys = self.predicted_summary_data.keys() - predictedvb = {pkey: {d: {} for d in depths} for pkey in pkeys} - else: - predictedvb = {pkey: None for pkey in self.predicted_summary_data.keys()} - - for w in widths: - (i, qs) = width_to_spec[w] - data = datadict[i][qs][datatype] - if dopredictions: - preddata = {pkey: self.predicted_summary_data[pkey][i][qs][datatype] for pkey in pkeys} - for d in depths: - if d in data.keys(): - - dline = data[d] - - if globaldata: - - failcount = _np.sum(_np.isnan(dline)) - fails[d][w] = (len(dline) - failcount, failcount) - - if statistic == 'dist': - vb[d][w] = rescale_function(dline, w) - else: - if not _np.isnan(rescale_function(dline, w)).all(): - if statistic == 'max' or statistic == 'maxmax': - vb[d][w] = _np.nanmax(rescale_function(dline, w)) - elif statistic == 'mean': - vb[d][w] = _np.nanmean(rescale_function(dline, w)) - elif statistic == 'min' or statistic == 'minmin': - vb[d][w] = _np.nanmin(rescale_function(dline, w)) - else: - vb[d][w] = _np.nan - - else: - failline = [(len(dpass) - _np.sum(_np.isnan(dpass)), _np.sum(_np.isnan(dpass))) - for dpass in dline] - - if statistic == 'max' or statistic == 'maxmax': - vbdataline = [_np.nanmax(rescale_function(dpass, w)) - if not _np.isnan(rescale_function(dpass, w)).all() else _np.nan - for dpass in dline] - elif statistic == 'mean': - vbdataline = [_np.nanmean(rescale_function(dpass, w)) - if not _np.isnan(rescale_function(dpass, w)).all() else _np.nan - for dpass in dline] - elif statistic == 'min' or statistic == 'minmin': - vbdataline = [_np.nanmin(rescale_function(dpass, w)) - if not _np.isnan(rescale_function(dpass, w)).all() else _np.nan - for dpass in dline] - elif statistic == 'dist': - vbdataline = [rescale_function(dpass, w) for dpass in dline] - - if not aggregate: - for i in range(len(vb)): - vb[i][d][w] = vbdataline[i] - fails[i][d][w] = failline[i] - - if aggregate: - - successcount = 0 - failcount = 0 - for (successcountpass, failcountpass) in failline: - successcount += successcountpass - failcount += failcountpass - fails[d][w] = (successcount, failcount) - - if statistic == 'dist': - vb[d][w] = [item for sublist in vbdataline for item in sublist] - else: - if not _np.isnan(vbdataline).all(): - if statistic == 'max' or statistic == 'maxmax': - vb[d][w] = _np.nanmax(vbdataline) - elif statistic == 'mean': - vb[d][w] = _np.nanmean(vbdataline) - elif statistic == 'min' or statistic == 'minmin': - vb[d][w] = _np.nanmin(vbdataline) - else: - vb[d][w] = _np.nan - - # Repeat the process for the predictions, but with simpler code as don't have to - # deal with passes or NaNs. - if dopredictions: - pdline = {pkey: preddata[pkey][d] for pkey in pkeys} - for pkey in pkeys: - if statistic == 'dist': - predictedvb[pkey][d][w] = rescale_function(pdline[pkey], w) - if statistic == 'max' or statistic == 'maxmax': - predictedvb[pkey][d][w] = _np.max(rescale_function(pdline[pkey], w)) - if statistic == 'mean': - predictedvb[pkey][d][w] = _np.mean(rescale_function(pdline[pkey], w)) - if statistic == 'min' or statistic == 'minmin': - predictedvb[pkey][d][w] = _np.min(rescale_function(pdline[pkey], w)) - - if statistic == 'minmin' or statistic == 'maxmax': - if aggregate: - for d in vb.keys(): - for w in vb[d].keys(): - for d2 in vb.keys(): - for w2 in vb[d2].keys(): - if statistic == 'minmin' and d2 <= d and w2 <= w and vb[d2][w2] < vb[d][w]: - vb[d][w] = vb[d2][w2] - if statistic == 'maxmax' and d2 >= d and w2 >= w and vb[d2][w2] > vb[d][w]: - vb[d][w] = vb[d2][w2] - else: - for i in range(self.numpasses): - for d in vb[i].keys(): - for w in vb[i][d].keys(): - for d2 in vb[i].keys(): - for w2 in vb[i][d2].keys(): - if statistic == 'minmin' and d2 <= d and w2 <= w and vb[i][d2][w2] < vb[i][d][w]: - vb[i][d][w] = vb[i][d2][w2] - if statistic == 'maxmax' and d2 >= d and w2 >= w and vb[i][d2][w2] > vb[i][d][w]: - vb[i][d][w] = vb[i][d2][w2] - - out = {'data': vb, 'fails': fails, 'predictions': predictedvb} - - return out - - def flattened_data(self, specs=None, aggregate=True): - - flattened_data = {} - - if specs is None: - specs = self.filter_experiments() - - qubits = self._specs[0].get_structure()[0] # An arbitrary key in the dict of the summary data. - if aggregate: - flattened_data = {dtype: [] for dtype in self.pass_summary_data[0][qubits].keys()} - else: - flattened_data = {dtype: [[] for i in range(self.numpasses)] - for dtype in self.pass_summary_data[0][qubits].keys()} - flattened_data.update({dtype: [] for dtype in self.global_summary_data[0][qubits].keys()}) - flattened_data.update({dtype: [] for dtype in self.aux[0][qubits].keys()}) - flattened_data.update({'predictions': {pkey: {'success_probabilities': []} - for pkey in self.predicted_summary_data.keys()}}) - - for specind, structure in specs.items(): - for qubits in structure: - for dtype, data in self.pass_summary_data[specind][qubits].items(): - for depth, dataline in data.items(): - #print(specind, qubits, dtype, depth) - if aggregate: - aggregatedata = _np.array(dataline[0]) - # print(aggregatedata) - # print(type(aggregatedata)) - # print(type(aggregatedata[0])) - for i in range(1, self.numpasses): - # print(dataline[i]) - # print(type(dataline[i])) - # print(type(dataline[i][0])) - aggregatedata = aggregatedata + _np.array(dataline[i]) - flattened_data[dtype] += list(aggregatedata) - else: - for i in range(self.numpasses): - flattened_data[dtype][i] += dataline[i] - - for dtype, data in self.global_summary_data[specind][qubits].items(): - for depth, dataline in data.items(): - flattened_data[dtype] += dataline - for dtype, data in self.aux[specind][qubits].items(): - for depth, dataline in data.items(): - flattened_data[dtype] += dataline - for pkey in self.predicted_summary_data.keys(): - data = self.predicted_summary_data[pkey][specind][qubits] - if 'success_probabilities' in data.keys(): - for depth, dataline in data['success_probabilities'].items(): - flattened_data['predictions'][pkey]['success_probabilities'] += dataline - else: - for (depth, dataline1), dataline2 in zip(data['success_counts'].items(), - data['total_counts'].values()): - flattened_data['predictions'][pkey]['success_probabilities'] += list( - _np.array(dataline1) / _np.array(dataline2)) - - # Only do this if we've not already stored the success probabilities in the benchamrker. - if ('success_counts' in flattened_data) and ('total_counts' in flattened_data) \ - and ('success_probabilities' not in flattened_data): - if aggregate: - flattened_data['success_probabilities'] = [sc / tc if tc > 0 else _np.nan for sc, - tc in zip(flattened_data['success_counts'], - flattened_data['total_counts'])] - else: - flattened_data['success_probabilities'] = [[sc / tc if tc > 0 else _np.nan for sc, tc in zip( - scpass, tcpass)] for scpass, tcpass in zip(flattened_data['success_counts'], - flattened_data['total_counts'])] - - return flattened_data - - def test_pass_stability(self, formatdata=False, verbosity=1): - - assert(self.multids is not None), \ - "Can only run the stability analysis if a MultiDataSet is contained in this Benchmarker!" - - if not formatdata: - assert('success-fail' in self.multids.keys()), "Must have generated/imported a success-fail format DataSet!" - else: - if 'success-fail' not in self.multids.keys(): - if verbosity > 0: - print("No success/fail dataset found, so first creating this dataset from the full data...", end='') - self.generate_success_or_fail_dataset() - if verbosity > 0: - print("complete.") - - if len(self.multids['success-fail']) > 1: - self.dscomparator = _dcomp.DataComparator(self.multids['success-fail'], allow_bad_circuits=True) - self.dscomparator.run(verbosity=verbosity) - - def generate_success_or_fail_dataset(self, overwrite=False): - """ - """ - - assert('standard' in self.multids.keys()) - if not overwrite: - assert('success-fail' not in self.multids.keys()) - - sfmultids = _multids.MultiDataSet() - - for ds_ind, ds in self.multids['standard'].items(): - sfds = _stdds.DataSet(outcome_labels=['success', 'fail'], collision_action=ds.collisionAction) - for circ, dsrow in ds.items(strip_occurrence_tags=True): - try: - scounts = dsrow[dsrow.aux[self.success_key]] - except: - scounts = 0 - tcounts = dsrow.total - sfds.add_count_dict(circ, {'success': scounts, 'fail': tcounts - scounts}, aux=dsrow.aux) - - sfds.done_adding_data() - sfmultids.add_dataset(ds_ind, sfds) - - self.multids['success-fail'] = sfmultids - - # def get_all_data(self): - - # for circ - - def summary_data(self, datatype, specindex, qubits=None): - - spec = self._specs[specindex] - structure = spec.get_structure() - if len(structure) == 1: - if qubits is None: - qubits = structure[0] - - assert(qubits in structure), "Invalid choice of qubits for this spec!" - - return self.pass_summary_data[specindex][qubits][datatype] - - #def getauxillary_data(self, datatype, specindex, qubits=None): - - #def get_predicted_summary_data(self, prediction, datatype, specindex, qubits=None): - - def create_summary_data(self, predictions=None, verbosity=2, auxtypes=None): - """ - todo - """ - if predictions is None: - predictions = dict() - if auxtypes is None: - auxtypes = [] - assert(self.multids is not None), "Cannot generate summary data without a DataSet!" - assert('standard' in self.multids.keys()), "Currently only works for standard dataset!" - useds = 'standard' - # We can't use the success-fail dataset if there's any simultaneous benchmarking. Not in - # it's current format anyway. - - summarydata = {} - aux = {} - globalsummarydata = {} - predsummarydata = {} - predds = None - preddskey = None - for pkey in predictions.keys(): - predsummarydata[pkey] = {} - if isinstance(predictions[pkey], _stdds.DataSet): - assert(predds is None), "Can't have two DataSet predictions!" - predds = predictions[pkey] - preddskey = pkey - else: - assert(isinstance(predictions[pkey], _oplessmodel.SuccessFailModel) - ), "If not a DataSet must be an ErrorRatesModel!" - - datatypes = ['success_counts', 'total_counts', 'hamming_distance_counts', 'success_probabilities'] - if self.dscomparator is not None: - stabdatatypes = ['tvds', 'pvals', 'jsds', 'llrs', 'sstvds'] - else: - stabdatatypes = [] - - #preddtypes = ('success_probabilities', ) - auxtypes = ['twoQgate_count', 'depth', 'target', 'width', 'circuit_index'] + auxtypes - - def _get_datatype(datatype, dsrow, circ, target, qubits): - - if datatype == 'success_counts': - return _analysis.marginalized_success_counts(dsrow, circ, target, qubits) - elif datatype == 'total_counts': - return dsrow.total - elif datatype == 'hamming_distance_counts': - return _analysis.marginalized_hamming_distance_counts(dsrow, circ, target, qubits) - elif datatype == 'success_probabilities': - sc = _analysis.marginalized_success_counts(dsrow, circ, target, qubits) - tc = dsrow.total - if tc == 0: - return _np.nan - else: - return sc / tc - else: - raise ValueError("Unknown data type!") - - numpasses = len(self.multids[useds].keys()) - - for ds_ind in self.multids[useds].keys(): - - if verbosity > 0: - print(" - Processing data from pass {} of {}. Percent complete:".format(ds_ind + 1, - len(self.multids[useds]))) - - #circuits = {} - numcircuits = len(self.multids[useds][ds_ind].keys()) - percent = 0 - - if preddskey is None or ds_ind > 0: - iterator = zip(self.multids[useds][ds_ind].items(strip_occurrence_tags=True), - self.multids[useds].auxInfo.values(), _cycle(zip([None, ], [None, ]))) - else: - iterator = zip(self.multids[useds][ds_ind].items(strip_occurrence_tags=True), - self.multids[useds].auxInfo.values(), - predds.items(strip_occurrence_tags=True)) - - for i, ((circ, dsrow), auxdict, (pcirc, pdsrow)) in enumerate(iterator): - - if pcirc is not None: - if not circ == pcirc: - print('-{}-'.format(i)) - pdsrow = predds[circ] - _warnings.warn("Predicted DataSet is ordered differently to the main DataSet!" - + "Reverting to potentially slow dictionary hashing!") - - if verbosity > 0: - if _np.floor(100 * i / numcircuits) >= percent: - percent += 1 - if percent in (1, 26, 51, 76): - print("\n {},".format(percent), end='') - else: - print("{},".format(percent), end='') - if percent == 100: - print('') - - speckeys = auxdict['spec'] - try: - depth = auxdict['depth'] - except: - depth = auxdict['length'] - target = auxdict['target'] - - if isinstance(speckeys, str): - speckeys = [speckeys] - - for speckey in speckeys: - specind = self._speckeys.index(speckey) - spec = self._specs[specind] - structure = spec.get_structure() - - # If we've not yet encountered this specind, we create the required dictionaries to store the - # summary data from the circuits associated with that spec. - if specind not in summarydata.keys(): - - assert(ds_ind == 0) - summarydata[specind] = {qubits: {datatype: {} - for datatype in datatypes} for qubits in structure} - aux[specind] = {qubits: {auxtype: {} for auxtype in auxtypes} for qubits in structure} - - # Only do predictions on the first pass dataset. - for pkey in predictions.keys(): - predsummarydata[pkey][specind] = {} - for pkey in predictions.keys(): - if pkey == preddskey: - predsummarydata[pkey][specind] = {qubits: {datatype: {} for datatype in datatypes} - for qubits in structure} - else: - predsummarydata[pkey][specind] = { - qubits: {'success_probabilities': {}} for qubits in structure} - - globalsummarydata[specind] = {qubits: {datatype: {} - for datatype in stabdatatypes} for qubits in structure} - - # If we've not yet encountered this depth, we create the list where the data for that depth - # is stored. - for qubits in structure: - if depth not in summarydata[specind][qubits][datatypes[0]].keys(): - - assert(ds_ind == 0) - for datatype in datatypes: - summarydata[specind][qubits][datatype][depth] = [[] for i in range(numpasses)] - for auxtype in auxtypes: - aux[specind][qubits][auxtype][depth] = [] - - for pkey in predictions.keys(): - if pkey == preddskey: - for datatype in datatypes: - predsummarydata[pkey][specind][qubits][datatype][depth] = [] - else: - predsummarydata[pkey][specind][qubits]['success_probabilities'][depth] = [] - - for datatype in stabdatatypes: - globalsummarydata[specind][qubits][datatype][depth] = [] - - #print('---', i) - for qubits_ind, qubits in enumerate(structure): - for datatype in datatypes: - x = _get_datatype(datatype, dsrow, circ, target, qubits) - summarydata[specind][qubits][datatype][depth][ds_ind].append(x) - # Only do predictions on the first pass dataset. - if preddskey is not None and ds_ind == 0: - x = _get_datatype(datatype, pdsrow, circ, target, qubits) - predsummarydata[preddskey][specind][qubits][datatype][depth].append(x) - - # Only do predictions and aux on the first pass dataset. - if ds_ind == 0: - for auxtype in auxtypes: - if auxtype == 'twoQgate_count': - auxdata = circ.two_q_gate_count() - elif auxtype == 'depth': - auxdata = circ.depth - elif auxtype == 'target': - auxdata = target - elif auxtype == 'circuit_index': - auxdata = i - elif auxtype == 'width': - auxdata = len(qubits) - else: - auxdata = auxdict.get(auxtype, None) - - aux[specind][qubits][auxtype][depth].append(auxdata) - - for pkey, predmodel in predictions.items(): - if pkey != preddskey: - if set(circ.line_labels) != set(qubits): - trimmedcirc = circ.copy(editable=True) - for q in circ.line_labels: - if q not in qubits: - trimmedcirc.delete_lines(q) - else: - trimmedcirc = circ - - predsp = predmodel.probabilities(trimmedcirc)[('success',)] - predsummarydata[pkey][specind][qubits]['success_probabilities'][depth].append( - predsp) - - for datatype in stabdatatypes: - if datatype == 'tvds': - x = self.dscomparator.tvds.get(circ, _np.nan) - elif datatype == 'pvals': - x = self.dscomparator.pVals.get(circ, _np.nan) - elif datatype == 'jsds': - x = self.dscomparator.jsds.get(circ, _np.nan) - elif datatype == 'llrs': - x = self.dscomparator.llrs.get(circ, _np.nan) - globalsummarydata[specind][qubits][datatype][depth].append(x) - - if verbosity > 0: - print('') - - # Record the data in the object at the end. - self.predicted_summary_data = predsummarydata - self.pass_summary_data = summarydata - self.global_summary_data = globalsummarydata - self.aux = aux - - def analyze(self, specindices=None, analysis='adjusted', bootstraps=200, verbosity=1): - """ - todo - - todo: this partly ignores specindices - """ - #self.create_summary_data(specindices=specindices, datatype=analysis, verbosity=verbosity) - - for i, rbdatadict in self._summary_data.items(): - #if not isinstance(rbdata, dict): - # self._rbresults[i] = rb.analysis.std_practice_analysis(rbdata) - #else: - #self._rbresults[i] = {} - #for key in rbdata.items(): - if verbosity > 0: - print('- Running analysis for {} of {}'.format(i, len(self._summary_data))) - self._rbresults['adjusted'][i] = {} - self._rbresults['raw'][i] = {} - for j, (key, rbdata) in enumerate(rbdatadict.items()): - if verbosity > 1: - print(' - Running analysis for qubits {} ({} of {})'.format(key, j, len(rbdatadict))) - if analysis == 'all' or analysis == 'raw': - self._rbresults['raw'][i][key] = _analysis.std_practice_analysis( - rbdata, bootstrap_samples=bootstraps, datatype='raw') - if (analysis == 'all' and rbdata.datatype == 'hamming_distance_counts') or analysis == 'adjusted': - self._rbresults['adjusted'][i][key] = _analysis.std_practice_analysis( - rbdata, bootstrap_samples=bootstraps, datatype='adjusted') - - def filter_experiments(self, numqubits=None, containqubits=None, onqubits=None, sampler=None, - two_qubit_gate_prob=None, prefilter=None, benchmarktype=None): - """ - todo - - """ - - kept = {} - for i, spec in enumerate(self._specs): - structures = spec.get_structure() - for qubits in structures: - - keep = True - - if keep: - if benchmarktype is not None: - if spec.type != benchmarktype: - keep = False - - if keep: - if numqubits is not None: - if len(qubits) != numqubits: - keep = False - - if keep: - if containqubits is not None: - if not set(containqubits).issubset(qubits): - keep = False - - if keep: - if onqubits is not None: - if set(qubits) != set(onqubits): - keep = False - - if keep: - if sampler is not None: - if not spec._sampler == sampler: - keep = False - - if keep: - if two_qubit_gate_prob is not None: - if not _np.allclose(two_qubit_gate_prob, spec.get_twoQgate_rate()): - keep = False - - if keep: - if i not in kept.keys(): - kept[i] = [] - kept[i].append(qubits) - - if prefilter is not None: - dellist = [] - for key in kept.keys(): - if key not in prefilter.keys(): - dellist.append(key) - else: - newlist = [] - for qubits in kept[key]: - if qubits in prefilter[key]: - newlist.append(qubits) - if len(newlist) == 0: - dellist.append(key) - else: - kept[key] = newlist - - for key in dellist: - del kept[key] - - return kept - - # for i, rbdata in self._adjusted_summary_data.items(): - # #if not isinstance(rbdata, dict): - # # self._rbresults[i] = rb.analysis.std_practice_analysis(rbdata) - # #else: - # #self._rbresults[i] = {} - # #for key in rbdata.items(): - # self._adjusted_rbresults[i] = rb.analysis.std_practice_analysis(rbdata, bootstrap_samples=0, - # asymptote=1/4**rbdata.number_of_qubits) - - -# class RBResults(object): -# """ -# An object to contain the results of an RB analysis -# """ - -# def __init__(self, data, rtype, fits): -# """ -# Initialize an RBResults object. - -# Parameters -# ---------- -# data : RBSummaryDataset -# The RB summary data that the analysis was performed for. - -# rtype : {'IE','AGI'} -# The type of RB error rate, corresponding to different dimension-dependent -# re-scalings of (1-p), where p is the RB decay constant in A + B*p^m. - -# fits : dict -# A dictionary containing FitResults objects, obtained from one or more -# fits of the data (e.g., a fit with all A, B and p as free parameters and -# a fit with A fixed to 1/2^n). -# """ -# self.data = data -# self.rtype = rtype -# self.fits = fits - -# def plot(self, fitkey=None, decay=True, success_probabilities=True, size=(8, 5), ylim=None, xlim=None, -# legend=True, title=None, figpath=None): -# """ -# Plots RB data and, optionally, a fitted exponential decay. - -# Parameters -# ---------- -# fitkey : dict key, optional -# The key of the self.fits dictionary to plot the fit for. If None, will -# look for a 'full' key (the key for a full fit to A + Bp^m if the standard -# analysis functions are used) and plot this if possible. It otherwise checks -# that there is only one key in the dict and defaults to this. If there are -# multiple keys and none of them are 'full', `fitkey` must be specified when -# `decay` is True. - -# decay : bool, optional -# Whether to plot a fit, or just the data. - -# success_probabilities : bool, optional -# Whether to plot the success probabilities distribution, as a violin plot. (as well -# as the *average* success probabilities at each length). - -# size : tuple, optional -# The figure size - -# ylim, xlim : tuple, optional -# The x and y limits for the figure. - -# legend : bool, optional -# Whether to show a legend. - -# title : str, optional -# A title to put on the figure. - -# figpath : str, optional -# If specified, the figure is saved with this filename. -# """ - -# # Future : change to a plotly plot. -# try: import matplotlib.pyplot as _plt -# except ImportError: raise ValueError("This function requires you to install matplotlib!") - -# if decay and fitkey is None: -# allfitkeys = list(self.fits.keys()) -# if 'full' in allfitkeys: fitkey = 'full' -# else: -# assert(len(allfitkeys) == 1), \ -# "There are multiple fits and none have the key 'full'. Please specify the fit to plot!" -# fitkey = allfitkeys[0] - -# _plt.figure(figsize=size) -# _plt.plot(self.data.lengths, self.data.ASPs, 'o', label='Average success probabilities') - -# if decay: -# lengths = _np.linspace(0, max(self.data.lengths), 200) -# A = self.fits[fitkey].estimates['A'] -# B = self.fits[fitkey].estimates['B'] -# p = self.fits[fitkey].estimates['p'] -# _plt.plot(lengths, A + B * p**lengths, -# label='Fit, r = {:.2} +/- {:.1}'.format(self.fits[fitkey].estimates['r'], -# self.fits[fitkey].stds['r'])) - -# if success_probabilities: -# _plt.violinplot(list(self.data.success_probabilities), self.data.lengths, points=10, widths=1., -# showmeans=False, showextrema=False, showmedians=False) # , label='Success probabilities') - -# if title is not None: _plt.title(title) -# _plt.ylabel("Success probability") -# _plt.xlabel("RB sequence length $(m)$") -# _plt.ylim(ylim) -# _plt.xlim(xlim) - -# if legend: _plt.legend() - -# if figpath is not None: _plt.savefig(figpath, dpi=1000) -# else: _plt.show() - -# return diff --git a/pygsti/extras/rb/dataset.py b/pygsti/extras/rb/dataset.py deleted file mode 100644 index 249c665df..000000000 --- a/pygsti/extras/rb/dataset.py +++ /dev/null @@ -1,378 +0,0 @@ -""" Encapsulates RB results and dataset objects """ -#*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). -# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights -# in this software. -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -# in compliance with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. -#*************************************************************************************************** - -import copy as _copy - -import numpy as _np - -#from . import analysis as _analysis # Doesn't exist! -_analysis = None # TODO - fix or remove this dependency - - -def create_summary_datasets(ds, spec, datatype='adjusted', verbosity=1): - """ - todo - - """ - structure = spec.get_structure() - circuits = spec.get_circuits() - lengths = list(circuits.keys()) - lengths.sort() - - success_counts = {} - total_counts = {} - hamming_distance_counts = {} - - for qubits in structure: - - if datatype == 'raw': - success_counts[qubits] = {} - total_counts[qubits] = {} - hamming_distance_counts[qubits] = None - - elif datatype == 'adjusted': - success_counts[qubits] = None - total_counts[qubits] = None - hamming_distance_counts[qubits] = {} - - else: - raise ValueError("Requested data type ` {} ` not understood!".format(datatype)) - - if verbosity == 1: - tab = ' ' - if verbosity > 1: - tab = ' ' - - for mit, (m, circuitlist) in enumerate(circuits.items()): - - if verbosity > 0: - print(tab + "- Processing length {} of {}".format(mit + 1, len(circuits))) - - for qubits in structure: - if datatype == 'raw': - success_counts[qubits][m] = [] - total_counts[qubits][m] = [] - elif datatype == 'adjusted': - hamming_distance_counts[qubits][m] = [] - - for (circ, target) in circuitlist: - dsrow = ds[circ] - for qubits in structure: - if datatype == 'raw': - success_counts[qubits][m].append(_analysis.marginalized_success_counts(dsrow, circ, target, qubits)) - total_counts[qubits][m].append(dsrow.total) - elif datatype == 'adjusted': - hamming_distance_counts[qubits][m].append( - _analysis.marginalized_hamming_distance_counts(dsrow, circ, target, qubits)) - - summary_data = {} - for qubits in structure: - #print(success_counts[qubits]) - #print(total_counts[qubits]) - #print(hamming_distance_counts[qubits]) - summary_data[qubits] = RBSummaryDataset(len(qubits), success_counts=success_counts[qubits], - total_counts=total_counts[qubits], - hamming_distance_counts=hamming_distance_counts[qubits]) - - return summary_data - - -class RBSummaryDataset(object): - """ - An object to summarize the results of RB experiments as relevant to implementing a standard RB analysis on the data. - This dataset type only records the "RB length" of a circuit, how many times the circuit resulted in "success", and, - optionally, some basic circuit information that can be helpful in understandingthe results. I.e., it doesn't - store all the details about the circuits and the counts for each circuit (use a standard DataSet object to store - the entire output of RB experiments). - """ - - def __init__(self, num_qubits, success_counts=None, total_counts=None, hamming_distance_counts=None, - aux=None, finitecounts=True, descriptor=''): - """ - # todo : update. - - Initialize an RB summary dataset. - - Parameters - ---------- - num_qubits : int - The number of qubits the dataset is for. This should be the number of qubits the RB experiments where - "holistically" performed on. So, this dataset type is not suitable for, e.g., a *full* set of simultaneous - RB data, which consists of parallel RB on different qubits. Data of that sort can be input into - multiple RBSummaryDataset objects. - - lengths : list of ints - A list of the "RB lengths" that the data is for. I.e., these are the "m" values in Pm = A + Bp^m. - E.g., for direct RB this should be the number of circuit layers of native gates in the "core" circuit - (i.e., not including the prep/measure stabilizer circuits). For Clifford RB this should be the number of - Cliffords in the circuit (+ an arbitrary constant, traditionally -1, but -2 is more consistent with - direct RB and is the pyGSTi convention for generating CRB circuits) *before* it is compiled into the - native gates. This can always be the length value used to generate the circuit, if a pyGSTi RB - circuit/experiment generation function was used to generate the circuit. - - This list should be the same length as the input results data (e.g., `success_counts` below). If - `sortedinput` is False (the default), it is a list that has an entry for each circuit run (so values - can appear multiple times in the list and in any order). If `sortedinput` is True is an ordered list - containing each and every RB length once. - - success_counts : list of ints, or list of list of ints, optional - Success counts, i.e., the number of times a circuit returns the "success" result. Normally this - should be a list containing ints with `success_counts[i]` containing the success counts for a circuit - with RB length `length[i]`. This is the case when `sortedinput` is False. But, if `sortedinput` is - True, it is instead a list of lists of ints: the list at `success_counts[i]` contains the data for - all circuits with RB length `lengths[i]` (in this case `lengths` is an ordered list containing each - RB length once). `success_counts` can be None, and the data can instead be specified via - `success_probabilities`. But, inputing the data as success counts is the preferred option for - experimental data. - - total_counts : int, or list of ints, or list of list of ints, optional - If not None, an int that specifies the total number of counts per circuit *or* a list that specifies - the total counts for each element in success_counts (or success_probabilities). This is *not* optional - if success_counts is provided, and should always be specified with experimental data. - - success_probabilities : list of floats, or list of list of floats, optional - The same as `success_counts` except that this list specifies observed survival probabilities, rather - than the number of success counts. Can only be specified if `success_counts` is None, and it is better - to input experimental data as `success_counts` (but this option is useful for finite-sampling-free - simulated data). - - circuit_depths : list of ints, or list of list of ints, optional - Has same format has `success_counts` or `success_probabilities`. Contains circuit depths. This is - additional auxillary information that it is often useful to have when analyzing data from any type - of RB that includes any compilation (e.g., Clifford RB). But this is not essential. - - circuit_twoQgate_counts : list of ints, or list of list of ints, optional - Has same format has `success_counts` or `success_probabilities`. Contains circuit 2-qubit gate counts. - This is additional auxillary information that it is often useful for interpretting RB results. - - descriptor : str, optional - A string that describes what the data is for. - - """ - if aux is None: - aux = {} - self.num_qubits = num_qubits - self.finitecounts = finitecounts - self.aux = _copy.deepcopy(aux) - self.descriptor = descriptor - - assert(not (success_counts is not None and hamming_distance_counts is not None)), "Only one data " + \ - "type should be provided!" - - if success_counts is not None: - - self.datatype = 'success_counts' - self.counts = _copy.deepcopy(success_counts) - if self.finitecounts: - assert(total_counts is not None), "The total counts per circuit is required!" - self._total_counts = _copy.deepcopy(total_counts) - else: - self._total_counts = 1 - - elif hamming_distance_counts is not None: - - self.datatype = 'hamming_distance_counts' - self.counts = _copy.deepcopy(hamming_distance_counts) - - assert(total_counts is None), "The total counts per circuit should not be provided, " + \ - "as it is implicit in the Hamming distance data!" - - if self.finitecounts: - # For Hamming distance data we just compute total counts on the fly. - self._total_counts = None - else: - self._total_counts = 1 - - else: - raise ValueError("No data provided! `success_counts` or `hamming_distance_counts` must be not None!") - - lengths = list(self.counts.keys()) - lengths.sort() - self.lengths = lengths - - # Generate "standard" and "adjusted" success probabilities - - self.SPs = [] - self.ASPs = [] - for l in self.lengths: - SPs = [self.success_counts(l, i) / self.total_counts(l, i) for i in range(len(self.counts[l]))] - self.SPs.append(SPs) - self.ASPs.append(_np.mean(SPs)) - - if self.datatype == 'hamming_distance_counts': - self.adjusted_SPs = [] - self.adjusted_ASPs = [] - for l in self.lengths: - adjSPs = [self.adjusted_success_probability(l, i) for i in range(len(self.counts[l]))] - self.adjusted_SPs.append(adjSPs) - self.adjusted_ASPs.append(_np.mean(adjSPs)) - - else: - self.adjusted_SPs = None - self.adjusted_ASPs = None - - self.bootstraps = [] - - return - - def adjusted_success_probability(self, length, index): - """ - todo. - """ - return _analysis.adjusted_success_probability(self.hamming_distance_distribution(length, index)) - - def success_counts(self, length, index): - """ - todo - - """ - if self.datatype == 'success_counts': - return self.counts[length][index] - - else: - return self.counts[length][index][0] - - def total_counts(self, length, index): - """ - todo - - """ - if isinstance(self._total_counts, int): - return self._total_counts - - elif self._total_counts is None: - return _np.sum(self.counts[length][index]) - - else: - return self._total_counts[length][index] - - def hamming_distance_distribution(self, length, index): - """ - todo - - """ - if self.datatype == 'hamming_distance_counts': - return self.counts[length][index] / _np.sum(self.counts[length][index]) - - else: - raise ValueError("This is only possible for Hamming distance count data!") - - def success_probabilities(self, successtype='raw'): - """ - todo. - - """ - if successtype == 'raw': - return self.lengths, self.ASPs, self.SPs - - elif successtype == 'adjusted': - return self.lengths, self.adjusted_ASPs, self.adjusted_SPs - - def add_bootstrapped_datasets(self, samples=1000): - """ - Adds bootstrapped data. The bootstrap is over both the finite counts of each - circuit and over the circuits at each length. - - Parameters - ---------- - samples : int, optional - The number of bootstrapped data to construct. - - Returns - ------- - None - """ - for i in range(len(self.bootstraps), samples): - - # A new set of bootstrapped success counts, or Hamming distance counts. - if self.datatype == 'success_counts': - - success_counts = {} - hamming_distance_counts = None - total_counts = {} - - for j, l in enumerate(self.lengths): - - success_counts[l] = [] - if self.finitecounts: - total_counts[l] = [] - else: - total_counts = None - numcircuits = len(self.SPs[j]) - - for k in range(numcircuits): - - ind = _np.random.randint(numcircuits) - sampledSP = self.SPs[j][ind] - totalcounts = self.total_counts(l, ind) - if self.finitecounts: - success_counts[l].append(_np.random.binomial(totalcounts, sampledSP)) - total_counts[l].append(totalcounts) - else: - success_counts[l].append(sampledSP) - - else: - - success_counts = None - hamming_distance_counts = {} - total_counts = None - - for j, l in enumerate(self.lengths): - - hamming_distance_counts[l] = [] - numcircuits = len(self.SPs[j]) - - for k in range(numcircuits): - - ind = _np.random.randint(numcircuits) - sampledHDProbs = self.hamming_distance_distribution(l, ind) - - if self.finitecounts: - totalcounts = self.total_counts(l, ind) - hamming_distance_counts[l].append(list(_np.random.multinomial(totalcounts, sampledHDProbs))) - else: - hamming_distance_counts[l].append(sampledHDProbs) - - bootstrapped_dataset = RBSummaryDataset(self.num_qubits, success_counts, total_counts, - hamming_distance_counts, finitecounts=self.finitecounts, - descriptor='data created from a non-parametric bootstrap') - - self.bootstraps.append(bootstrapped_dataset) - - # todo : add this back in. - # def create_smaller_dataset(self, numberofcircuits): - # """ - # Creates a new dataset that has discarded the data from all but the first `numberofcircuits` - # circuits at each length. - - # Parameters - # ---------- - # numberofcircuits : int - # The maximum number of circuits to keep at each length. - - # Returns - # ------- - # RBSummaryDataset - # A new dataset containing less data. - # """ - # newRBSdataset = _copy.deepcopy(self) - # for i in range(len(newRBSdataset.lengths)): - # if newRBSdataset.success_counts is not None: - # newRBSdataset.success_counts[i] = newRBSdataset.success_counts[i][:numberofcircuits] - # if newRBSdataset.success_probabilities is not None: - # newRBSdataset.success_probabilities[i] = newRBSdataset.success_probabilities[i][:numberofcircuits] - # if newRBSdataset.total_counts is not None: - # newRBSdataset.total_counts[i] = newRBSdataset.total_counts[i][:numberofcircuits] - # if newRBSdataset.circuit_depths is not None: - # newRBSdataset.circuit_depths[i] = newRBSdataset.circuit_depths[i][:numberofcircuits] - # if newRBSdataset.circuit_twoQgate_counts is not None: - # newRBSdataset.circuit_twoQgate_counts[i] = newRBSdataset.circuit_twoQgate_counts[i][:numberofcircuits] - - # return newRBSdataset diff --git a/pygsti/extras/rb/io.py b/pygsti/extras/rb/io.py deleted file mode 100644 index d1452494c..000000000 --- a/pygsti/extras/rb/io.py +++ /dev/null @@ -1,814 +0,0 @@ -#*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). -# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights -# in this software. -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -# in compliance with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. -#*************************************************************************************************** - -import ast as _ast -import json as _json -import os as _os -import pickle as _pickle -import warnings as _warnings - -from pygsti.extras.rb import benchmarker as _benchmarker -from pygsti.extras.rb import dataset as _dataset -# todo : update -from pygsti.extras.rb import sample as _sample -from pygsti import io as _io -from pygsti.circuits import circuit as _cir -from pygsti.data import multidataset as _mds - - -#def load_benchmarking_data(basedir): - -def load_benchmarker(directory, load_datasets=True, verbosity=1): - """ - - """ - with open(directory + '/global.txt', 'r') as f: - globaldict = _json.load(f) - - numpasses = globaldict['numpasses'] - speckeys = globaldict['speckeys'] - success_key = globaldict['success_key'] - success_outcome = globaldict['success_outcome'] - dscomparator = globaldict['dscomparator'] - - if load_datasets: - dskeys = [dskey.name for dskey in _os.scandir(directory + '/data') if dskey.is_dir()] - multidsdict = {dskey: _mds.MultiDataSet()for dskey in dskeys} - - for dskey in dskeys: - for passnum in range(numpasses): - dsfn = directory + '/data/{}/ds{}.txt'.format(dskey, passnum) - ds = _io.read_dataset(dsfn, collision_action='keepseparate', record_zero_counts=False, - ignore_zero_count_lines=False, verbosity=verbosity) - multidsdict[dskey].add_dataset(passnum, ds) - else: - multidsdict = None - - specs = {} - for i, speckey in enumerate(speckeys): - specs[speckey] = load_benchmarkspec(directory + '/specs/{}.txt'.format(i)) - - summary_data = {'global': {}, 'pass': {}, 'aux': {}} - predictionkeys = [pkey.name for pkey in _os.scandir(directory + '/predictions') if pkey.is_dir()] - predicted_summary_data = {pkey: {} for pkey in predictionkeys} - - for i, spec in enumerate(specs.values()): - - summary_data['pass'][i] = {} - summary_data['global'][i] = {} - summary_data['aux'][i] = {} - for pkey in predictionkeys: - predicted_summary_data[pkey][i] = {} - - structure = spec.get_structure() - - for j, qubits in enumerate(structure): - - # Import the summary data for that spec and qubit subset - with open(directory + '/summarydata/{}-{}.txt'.format(i, j), 'r') as f: - sd = _json.load(f) - summary_data['pass'][i][qubits] = {} - for dtype, data in sd['pass'].items(): - summary_data['pass'][i][qubits][dtype] = {int(key): value for (key, value) in data.items()} - summary_data['global'][i][qubits] = {} - for dtype, data in sd['global'].items(): - summary_data['global'][i][qubits][dtype] = {int(key): value for (key, value) in data.items()} - - # Import the auxillary data - with open(directory + '/aux/{}-{}.txt'.format(i, j), 'r') as f: - aux = _json.load(f) - summary_data['aux'][i][qubits] = {} - for dtype, data in aux.items(): - summary_data['aux'][i][qubits][dtype] = {int(key): value for (key, value) in data.items()} - - # Import the predicted summary data for that spec and qubit subset - for pkey in predictionkeys: - with open(directory + '/predictions/{}/summarydata/{}-{}.txt'.format(pkey, i, j), 'r') as f: - psd = _json.load(f) - predicted_summary_data[pkey][i][qubits] = {} - for dtype, data in psd.items(): - predicted_summary_data[pkey][i][qubits][dtype] = { - int(key): value for (key, value) in data.items()} - - benchmarker = _benchmarker.Benchmarker(specs, ds=multidsdict, summary_data=summary_data, - predicted_summary_data=predicted_summary_data, - dstype='dict', success_outcome=success_outcome, - success_key=success_key, dscomparator=dscomparator) - - return benchmarker - - -def write_benchmarker(benchmarker, outdir, overwrite=False, verbosity=0): - - try: - _os.makedirs(outdir) - if verbosity > 0: - print(" - Created `" + outdir + "` folder to store benchmarker in txt format.") - except: - if overwrite: - if verbosity > 0: - print(" - `" + outdir + "` folder already exists. Will write data into that folder.") - else: - raise ValueError("Directory already exists! Set overwrite to True or change the directory name!") - - globaldict = {} - globaldict['speckeys'] = benchmarker._speckeys - globaldict['numpasses'] = benchmarker.numpasses - globaldict['success_outcome'] = benchmarker.success_outcome - globaldict['success_key'] = benchmarker.success_key - - if benchmarker.dscomparator is not None: - - globaldict['dscomparator'] = {} - globaldict['dscomparator']['pVal_pseudothreshold'] = benchmarker.dscomparator.pVal_pseudothreshold - globaldict['dscomparator']['llr_pseudothreshold'] = benchmarker.dscomparator.llr_pseudothreshold - globaldict['dscomparator']['pVal_pseudothreshold'] = benchmarker.dscomparator.pVal_pseudothreshold - globaldict['dscomparator']['jsd_pseudothreshold'] = benchmarker.dscomparator.jsd_pseudothreshold - globaldict['dscomparator']['aggregate_llr'] = benchmarker.dscomparator.aggregate_llr - globaldict['dscomparator']['aggregate_llr_threshold'] = benchmarker.dscomparator.aggregate_llr_threshold - globaldict['dscomparator']['aggregate_nsigma'] = benchmarker.dscomparator.aggregate_nsigma - globaldict['dscomparator']['aggregate_nsigma_threshold'] = benchmarker.dscomparator.aggregate_nsigma_threshold - globaldict['dscomparator']['aggregate_pVal'] = benchmarker.dscomparator.aggregate_pVal - globaldict['dscomparator']['aggregate_pVal_threshold'] = benchmarker.dscomparator.aggregate_pVal_threshold - globaldict['dscomparator']['inconsistent_datasets_detected'] = \ - benchmarker.dscomparator.inconsistent_datasets_detected - globaldict['dscomparator']['number_of_significant_sequences'] = int( - benchmarker.dscomparator.number_of_significant_sequences) - globaldict['dscomparator']['significance'] = benchmarker.dscomparator.significance - - else: - globaldict['dscomparator'] = None - - # Write global details to file - with open(outdir + '/global.txt', 'w') as f: - _json.dump(globaldict, f, indent=4) - - _os.makedirs(outdir + '/specs') - _os.makedirs(outdir + '/summarydata') - _os.makedirs(outdir + '/aux') - - for pkey in benchmarker.predicted_summary_data.keys(): - _os.makedirs(outdir + '/predictions/{}/summarydata'.format(pkey)) - - for i, spec in enumerate(benchmarker._specs): - structure = spec.get_structure() - write_benchmarkspec(spec, outdir + '/specs/{}.txt'.format(i), warning=0) - - for j, qubits in enumerate(structure): - summarydict = {'pass': benchmarker.pass_summary_data[i][qubits], - 'global': benchmarker.global_summary_data[i][qubits] - } - fname = outdir + '/summarydata/' + '{}-{}.txt'.format(i, j) - with open(fname, 'w') as f: - _json.dump(summarydict, f, indent=4) - - aux = benchmarker.aux[i][qubits] - fname = outdir + '/aux/' + '{}-{}.txt'.format(i, j) - with open(fname, 'w') as f: - _json.dump(aux, f, indent=4) - - for pkey in benchmarker.predicted_summary_data.keys(): - summarydict = benchmarker.predicted_summary_data[pkey][i][qubits] - fname = outdir + '/predictions/{}/summarydata/'.format(pkey) + '{}-{}.txt'.format(i, j) - with open(fname, 'w') as f: - _json.dump(summarydict, f, indent=4) - - for dskey in benchmarker.multids.keys(): - fdir = outdir + '/data/{}'.format(dskey) - _os.makedirs(fdir) - for dsind in benchmarker.multids[dskey].keys(): - fname = fdir + '/ds{}.txt'.format(dsind) - _io.write_dataset(fname, benchmarker.multids[dskey][dsind], fixed_column_mode=False) - - -def create_benchmarker(dsfilenames, predictions=None, test_stability=True, auxtypes=None, verbosity=1): - if predictions is None: - predictions = dict() - if auxtypes is None: - auxtypes = [] - benchmarker = load_data_into_benchmarker(dsfilenames, verbosity=verbosity) - if test_stability: - if verbosity > 0: - print(" - Running stability analysis...", end='') - benchmarker.test_pass_stability(formatdata=True, verbosity=0) - if verbosity > 0: - print("complete.") - - benchmarker.create_summary_data(predictions=predictions, auxtypes=auxtypes) - - return benchmarker - -# Todo : just make this and create_benchmarker a single function? This import has been superceded -# by load_benchmarker - - -def load_data_into_benchmarker(dsfilenames=None, summarydatasets_filenames=None, summarydatasets_folder=None, - predicted_summarydatasets_folders=None, verbosity=1): - """ - todo - - """ - if predicted_summarydatasets_folders is None: - predicted_summarydatasets_folders = dict() - elif len(predicted_summarydatasets_folders) > 0: - assert(summarydatasets_folder is not None) - #if len(predicted_summarydatasets_folders) > 1: - # raise NotImplementedError("This is not yet supported!") - - if dsfilenames is not None: - - # If it is a filename, then we import the dataset from file. - if isinstance(dsfilenames, str): - dsfilenames = [dsfilenames, ] - elif not isinstance(dsfilenames, list): - raise ValueError("dsfilenames must be a str or a list of strings!") - - mds = _mds.MultiDataSet() - for dsfn_ind, dsfn in enumerate(dsfilenames): - - if dsfn[-4:] == '.txt': - print(dsfn) - mds.add_dataset(dsfn_ind, _io.read_dataset(dsfn, - collision_action='keepseparate', - record_zero_counts=False, - ignore_zero_count_lines=False, - verbosity=verbosity)) - - elif dsfn[-4:] == '.pkl': - - if verbosity > 0: - print(" - Loading DataSet from pickle file...", end='') - with open(dsfn, 'rb') as f: - mds.add_dataset(dsfn_ind, _pickle.load(f)) - if verbosity > 0: - print("complete.") - - else: - raise ValueError("File must end in .pkl or .txt!") - - # # If it isn't a string, we assume that `dsfilenames` is a DataSet. - # else: - - # ds = dsfilenames - - if verbosity > 0: print(" - Extracting metadata from the DataSet...", end='') - - # To store the aux information about the RB experiments. - all_spec_filenames = [] - # circuits_for_specfile = {} - # outdslist = [] - - # We go through the dataset and extract all the necessary auxillary information. - for circ in mds[mds.keys()[0]].keys(): - - # The spec filename or names for this circuits - specfns_forcirc = mds.auxInfo[circ]['spec'] - # The RB length for this circuit - # try: - # l = mds.auxInfo[circ]['depth'] - # except: - # l = mds.auxInfo[circ]['length'] - # The target bitstring for this circuit. - # target = mds.auxInfo[circ]['target'] - - # This can be a string (a single spec filename) or a list, so make always a list. - if isinstance(specfns_forcirc, str): - specfns_forcirc = [specfns_forcirc, ] - - for sfn_forcirc in specfns_forcirc: - # If this is the first instance of seeing this filename then... - if sfn_forcirc not in all_spec_filenames: - # ... we store it in the list of all spec filenames to import later. - all_spec_filenames.append(sfn_forcirc) - # And it won't yet be a key in the circuits_for_specfile dict, so we add it. - # circuits_for_specfile[sfn_forcirc] = {} - - # # If we've not yet had this length for that spec filename, we add that as a key. - # if l not in circuits_for_specfile[sfn_forcirc].keys(): - # circuits_for_specfile[sfn_forcirc][l] = [] - - # # We add the circuit and target output to the dict for the corresponding spec files. - # circuits_for_specfile[sfn_forcirc][l].append((circ, target)) - - # circ_specindices = [] - # for sfn_forcirc in specfns_forcirc: - # circ_specindices.append(all_spec_filenames.index(sfn_forcirc)) - - if verbosity > 0: - print("complete.") - print(" - Reading in the metadata from the extracted filenames...", end='') - - # We put RB specs that we create via file import (and the circuits above) into this dict - rbspecdict = {} - - # We look for spec files in the same directory as the datafiles, so we find what that is. - # THIS REQUIRES ALL THE FILES TO BE IN THE SAME DIRECTORY - directory = dsfilenames[0].split('/') - directory = '/'.join(directory[: -1]) - if len(directory) > 0: - directory += '/' - - for specfilename in all_spec_filenames: - - # Import the RB spec file. - rbspec = load_benchmarkspec(directory + specfilename) - # Add in the circuits that correspond to each spec, extracted from the dataset. - # rbspec.add_circuits(circuits_for_specfile[specfilename]) - # Record the spec in a list, to be given to an RBAnalyzer object. - rbspecdict[specfilename] = rbspec - - if verbosity > 0: - print("complete.") - print(" - Recording all of the data in a Benchmarker...", end='') - - # Put everything into an RBAnalyzer object, which is a container for RB data, and return this. - benchmarker = _benchmarker.Benchmarker(rbspecdict, ds=mds, summary_data=None) - - if verbosity > 0: print("complete.") - - return benchmarker - - elif (summarydatasets_filenames is not None) or (summarydatasets_folder is not None): - - rbspecdict = {} - - # If a dict, its just the keys of the dict that are the rbspec file names. - if summarydatasets_filenames is not None: - - specfiles = list(summarydatasets_filenames.keys()) - - # If a folder, we look for files in that folder with the standard name format. - elif summarydatasets_folder is not None: - specfiles = [] - specfilefound = True - i = 0 - while specfilefound: - try: - filename = summarydatasets_folder + "/spec{}.txt".format(i) - with open(filename, 'r') as f: - if verbosity > 0: - print(filename + " found") - specfiles.append(filename) - i += 1 - except: - specfilefound = False - if verbosity > 0: - print(filename + " not found so terminating spec file search.") - - for sfn_ind, specfilename in enumerate(specfiles): - - rbspec = load_benchmarkspec(specfilename) - rbspecdict[sfn_ind] = rbspec - - summary_data = {} - predicted_summary_data = {pkey: {} for pkey in predicted_summarydatasets_folders.keys()} - - for i, (specfilename, rbspec) in enumerate(zip(specfiles, rbspecdict.values())): - - structure = rbspec.get_structure() - summary_data[i] = {} - for pkey in predicted_summarydatasets_folders.keys(): - predicted_summary_data[pkey][i] = {} - - if summarydatasets_filenames is not None: - sds_filenames = summarydatasets_filenames[specfilename] - elif summarydatasets_folder is not None: - sds_filenames = [summarydatasets_folder + '/{}-{}.txt'.format(i, j) for j in range(len(structure))] - predsds_filenames_dict = {} - for pkey, pfolder in predicted_summarydatasets_folders.items(): - predsds_filenames_dict[pkey] = [pfolder + '/{}-{}.txt'.format(i, j) for j in range(len(structure))] - - for sdsfn, qubits in zip(sds_filenames, structure): - summary_data[i][qubits] = import_rb_summary_data(sdsfn, len(qubits), verbosity=verbosity) - - for pkey, predsds_filenames in predsds_filenames_dict.items(): - for sdsfn, qubits in zip(predsds_filenames, structure): - predicted_summary_data[pkey][i][qubits] = import_rb_summary_data( - sdsfn, len(qubits), verbosity=verbosity) - - benchmarker = _benchmarker.Benchmarker(rbspecdict, ds=None, summary_data=summary_data, - predicted_summary_data=predicted_summary_data) - - return benchmarker - - else: - raise ValueError("Either a filename for a DataSet or filenames for a set of RBSpecs " - + "and RBSummaryDatasets must be provided!") - - -def load_benchmarkspec(filename, circuitsfilename=None): - """ - todo - - """ - #d = {} - with open(filename) as f: - d = _json.load(f) - # for line in f: - # if len(line) > 0 and line[0] != '#': - # line = line.strip('\n') - # line = line.split(' ', 1) - # try: - # d[line[0]] = _ast.literal_eval(line[1]) - # except: - # d[line[0]] = line[1] - - #assert(d.get('type', None) == 'rb'), "This is for importing RB specs!" - - try: - rbtype = d['type'] - except: - raise ValueError("Input file does not contain a line specifying the RB type!") - assert(isinstance(rbtype, str)), "The RB type (specified as rbtype) must be a string!" - - try: - structure = d['structure'] - except: - raise ValueError("Input file does not contain a line specifying the structure!") - if isinstance(structure, list): - structure = tuple([tuple(qubits) for qubits in structure]) - assert(isinstance(structure, tuple)), "The structure must be a tuple!" - - try: - sampler = d['sampler'] - except: - raise ValueError("Input file does not contain a line specifying the circuit layer sampler!") - assert(isinstance(sampler, str)), "The sampler name must be a string!" - - samplerargs = d.get('samplerargs', None) - depths = d.get('depths', None) - numcircuits = d.get('numcircuits', None) - subtype = d.get('subtype', None) - - if samplerargs is not None: - assert(isinstance(samplerargs, dict)), "The samplerargs must be a dict!" - - if depths is not None: - assert(isinstance(depths, list) or isinstance(depths, tuple)), "The depths must be a list or tuple!" - - if numcircuits is not None: - assert(isinstance(numcircuits, list) or isinstance(numcircuits, int)), "numcircuits must be an int or list!" - - spec = _sample.BenchmarkSpec(rbtype, structure, sampler, samplerargs, depths=depths, - numcircuits=numcircuits, subtype=subtype) - - return spec - - -def write_benchmarkspec(spec, filename, circuitsfilename=None, warning=1): - """ - todo - - """ - if spec.circuits is not None: - if circuitsfilename is not None: - circuitlist = [circ for sublist in [spec.circuits[l] for l in spec.depths] for circ in sublist] - _io.write_circuit_list(circuitsfilename, circuitlist) - elif warning > 0: - _warnings.warn("The circuits recorded in this RBSpec are not being written to file!") - - # with open(filename, 'w') as f: - # f.write('type rb\n') - # f.write('rbtype ' + rbspec._rbtype + '\n') - # f.write('structure ' + str(rbspec._structure) + '\n') - # f.write('sampler ' + rbspec._sampler + '\n') - # f.write('lengths ' + str(rbspec._lengths) + '\n') - # f.write('numcircuits ' + str(rbspec._numcircuits) + '\n') - # f.write('rbsubtype ' + str(rbspec._rbsubtype) + '\n') - # f.write('samplerargs ' + str(rbspec._samplerargs) + '\n') - - specdict = spec.to_dict() - del specdict['circuits'] # Don't write the circuits to this file. - - with open(filename, 'w') as f: - _json.dump(specdict, f, indent=4) - - -def import_rb_summary_data(filename, numqubits, datatype='auto', verbosity=1): - """ - todo - - """ - try: - with open(filename, 'r') as f: - if verbosity > 0: print("Importing " + filename + "...", end='') - except: - raise ValueError("Date import failed! File does not exist or the format is incorrect.") - - aux = [] - descriptor = '' - # Work out the type of data we're importing - with open(filename, 'r') as f: - for line in f: - - if (len(line) == 0 or line[0] != '#'): break - - elif line.startswith("# "): - descriptor += line[2:] - - elif line.startswith("## "): - - line = line.strip('\n') - line = line.split(' ') - del line[0] - - if line[0:2] == ['rblength', 'success_probabilities']: - - auxind = 2 - if datatype == 'auto': - datatype = 'success_probabilities' - else: - assert(datatype == 'success_probabilities'), "The data format appears to be " + \ - "success probabilities!" - - elif line[0:3] == ['rblength', 'success_counts', 'total_counts']: - - auxind = 3 - if datatype == 'auto': - datatype = 'success_counts' - else: - assert(datatype == 'success_counts'), "The data format appears to be success counts!" - - elif line[0: numqubits + 2] == ['rblength', ] + ['hd{}c'.format(i) for i in range(numqubits + 1)]: - - auxind = numqubits + 2 - if datatype == 'auto': - datatype = 'hamming_distance_counts' - else: - assert(datatype == 'hamming_distance_counts'), "The data format appears to be Hamming " + \ - "distance counts!" - - elif line[0: numqubits + 2] == ['rblength', ] + ['hd{}p'.format(i) for i in range(numqubits + 1)]: - - auxind = numqubits + 2 - if datatype == 'auto': - datatype = 'hamming_distance_probabilities' - else: - assert(datatype == 'hamming_distance_probabilities'), "The data format appears to be " + \ - "Hamming distance probabilities!" - - else: - raise ValueError("Invalid file format!") - - if len(line) > auxind: - assert(line[auxind] == '#') - if len(line) > auxind + 1: - auxlabels = line[auxind + 1:] - else: - auxlabels = [] - - break - - # Prepare an aux dict to hold any auxillary data - aux = {key: {} for key in auxlabels} - - # Read in the data, using a different parser depending on the data type. - if datatype == 'success_counts': - - success_counts = {} - total_counts = {} - finitecounts = True - hamming_distance_counts = None - - with open(filename, 'r') as f: - for line in f: - if (len(line) > 0 and line[0] != '#'): - - line = line.strip('\n') - line = line.split(' ') - l = int(line[0]) - - if l not in success_counts: - success_counts[l] = [] - total_counts[l] = [] - for key in auxlabels: - aux[key][l] = [] - - success_counts[l].append(float(line[1])) - total_counts[l].append(float(line[2])) - - if len(aux) > 0: - assert(line[3] == '#'), "Auxillary data must be divided from the core data!" - for i, key in enumerate(auxlabels): - if key != 'target' and key != 'circuit': - aux[key][l].append(_ast.literal_eval(line[4 + i])) - else: - if key == 'target': - aux[key][l].append(line[4 + i]) - if key == 'circuit': - aux[key][l].append(_cir.Circuit(line[4 + i])) - - elif datatype == 'success_probabilities': - - success_counts = {} - total_counts = None - finitecounts = False - hamming_distance_counts = None - - with open(filename, 'r') as f: - for line in f: - if (len(line) > 0 and line[0] != '#'): - - line = line.strip('\n') - line = line.split(' ') - l = int(line[0]) - - if l not in success_counts: - success_counts[l] = [] - for key in auxlabels: - aux[key][l] = [] - - success_counts[l].append(float(line[1])) - - if len(aux) > 0: - assert(line[2] == '#'), "Auxillary data must be divided from the core data!" - for i, key in enumerate(auxlabels): - if key != 'target' and key != 'circuit': - aux[key][l].append(_ast.literal_eval(line[3 + i])) - else: - if key == 'target': - aux[key][l].append(line[3 + i]) - if key == 'circuit': - aux[key][l].append(_cir.Circuit(line[3 + i])) - - elif datatype == 'hamming_distance_counts' or datatype == 'hamming_distance_probabilities': - - hamming_distance_counts = {} - success_counts = None - total_counts = None - - if datatype == 'hamming_distance_counts': finitecounts = True - if datatype == 'hamming_distance_probabilities': finitecounts = False - - with open(filename, 'r') as f: - for line in f: - if (len(line) > 0 and line[0] != '#'): - - line = line.strip('\n') - line = line.split(' ') - l = int(line[0]) - - if l not in hamming_distance_counts: - hamming_distance_counts[l] = [] - for key in auxlabels: - aux[key][l] = [] - - hamming_distance_counts[l].append([float(line[1 + i]) for i in range(0, numqubits + 1)]) - - if len(aux) > 0: - assert(line[numqubits + 2] == '#'), "Auxillary data must be divided from the core data!" - for i, key in enumerate(auxlabels): - if key != 'target' and key != 'circuit': - aux[key][l].append(_ast.literal_eval(line[numqubits + 3 + i])) - else: - if key == 'target': - aux[key][l].append(line[numqubits + 3 + i]) - if key == 'circuit': - aux[key][l].append(line[numqubits + 3 + i]) - #aux[key][l].append(_cir.Circuit(line[numqubits + 3 + i])) - else: - raise ValueError("The data format couldn't be extracted from the file!") - - rbdataset = _dataset.RBSummaryDataset(numqubits, success_counts=success_counts, total_counts=total_counts, - hamming_distance_counts=hamming_distance_counts, aux=aux, - finitecounts=finitecounts, descriptor=descriptor) - - if verbosity > 0: - print('complete') - - return rbdataset - - -def write_rb_summary_data_to_file(ds, filename): - """ - todo - - """ - numqubits = ds.num_qubits - with open(filename, 'w') as f: - - descriptor_string = ds.descriptor.split("\n") - - for s in descriptor_string: - if len(s) > 0: - f.write("# " + s + "\n") - - if ds.datatype == 'success_counts': - if ds.finitecounts: - topline = '## rblength success_counts total_counts' - else: - topline = '## rblength success_probabilities' - - elif ds.datatype == 'hamming_distance_counts': - if ds.finitecounts: - topline = '## rblength' + ''.join([' hd{}c'.format(i) for i in range(0, numqubits + 1)]) - else: - topline = '## rblength' + ''.join([' hd{}p'.format(i) for i in range(0, numqubits + 1)]) - - auxlabels = list(ds.aux.keys()) - if len(auxlabels) > 0: - topline += ' #' - for key in auxlabels: topline += ' ' + key - - f.write(topline + '\n') - - for l, counts in ds.counts.items(): - - for i, c in enumerate(counts): - - if ds.datatype == 'success_counts': - if ds.finitecounts: - dataline = str(l) + ' ' + str(c) + ' ' + str(ds._total_counts[l][i]) - else: - dataline = str(l) + ' ' + str(c) - elif ds.datatype == 'hamming_distance_counts': - dataline = str(l) + ''.join([' ' + str(c[i]) for i in range(0, numqubits + 1)]) - - if len(auxlabels) > 0: - dataline += ' #' + ''.join([' ' + str(ds.aux[key][l][i]) for key in auxlabels]) - - f.write(dataline + '\n') - - return - - -# # todo update this. -# def import_rb_summary_data(filenames, numqubits, type='auto', verbosity=1): -# """ -# todo : redo -# Reads in one or more text files of summary RB data into a RBSummaryDataset object. This format -# is appropriate for using the RB analysis functions. The datafile(s) should have one of the -# following two formats: - -# Format 1 (`is_counts_data` is True): - -# # The number of qubits -# The number of qubits (this line is optional if `num_qubits` is specified) -# # RB length // Success counts // Total counts // Circuit depth // Circuit two-qubit gate count -# Between 3 and 5 columns of data (the last two columns are expected only if `contains_circuit_data` is True). - -# Format 2 (`is_counts_data` is False): - -# # The number of qubits -# The number of qubits (this line is optional if `num_qubits` is specified) -# # RB length // Survival probabilities // Circuit depth // Circuit two-qubit gate count -# Between 2 and 4 columns of data (the last two columns are expected only if `contains_circuit_data` is True). - -# Parameters -# ---------- -# filenames : str or list. -# The filename, or a list of filenams, where the data is stored. The data from all files is read -# into a *single* dataset, so normally it should all be data for a single RB experiment. - -# is_counts_data : bool, optional -# Whether the data to be read contains success counts data (True) or survival probability data (False). - -# contains_circuit_data : bool, optional. -# Whether the data counts summary circuit data. - -# finitesampling : bool, optional -# Records in the RBSummaryDataset whether the survival probability for each circuit was obtained -# from finite sampling of the outcome probabilities. This is there to, by default, warn the user -# that any finite sampling cannot be taken into account if the input is not counts data (when -# they run any analysis on the data). But it is useful to be able to set this to False for simulated -# data obtained from perfect outcome sampling. - -# num_qubits : int, optional. -# The number of qubits the data is for. Must be specified if this isn't in the input file. - -# total_counts : int, optional -# If the data is success probability data, the total counts can optional be input here. - -# verbosity : int, optional -# The amount of print-to-screen. - -# Returns -# ------- -# None -# """ - - -# # todo : update this. -# def write_rb_summary_data_to_file(RBSdataset, filename): -# """ -# Writes an RBSSummaryDataset to file, in the format that can be read back in by -# import_rb_summary_data(). - -# Parameters -# ---------- -# RBSdataset : RBSummaryDataset -# The data to write to file. - -# filename : str -# The filename where the dataset should be written. - -# Returns -# ------- -# None -# """ diff --git a/pygsti/extras/rb/simulate.py b/pygsti/extras/rb/simulate.py deleted file mode 100644 index 2b4dc8708..000000000 --- a/pygsti/extras/rb/simulate.py +++ /dev/null @@ -1,1020 +0,0 @@ -""" Clifford circuits with Pauli errors simulation functions """ -#*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). -# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights -# in this software. -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except -# in compliance with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. -#*************************************************************************************************** - -import os as _os -import time as _time - -import numpy as _np - -from pygsti.baseobjs.label import Label as _Lbl -from pygsti.data.dataset import DataSet as _DataSet -from pygsti.tools import symplectic as _symp - -#from . import sample as _samp -_samp = None # MOVED - and this module is deprecated & broken now, so just set to None - - -def random_paulierror_in_chp(q): - """ - todo. - """ - i = _np.random.randint(0, 3) - - if i == 0: - return ('p ' + str(q) + '\n') * 2 - - elif i == 1: - return 'h ' + str(q) + '\n' + ('p ' + str(q) + '\n') * 2 + 'h ' + str(q) + '\n' - - else: - return ('p ' + str(q) + '\n') * 2 + 'h ' + str(q) + '\n' + ('p ' + str(q) + '\n') * 2 + 'h ' + str(q) + '\n' - - -def random_pauli_in_chp(q): - """ - todo. - """ - i = _np.random.randint(0, 4) - - if i == 0: - return '' - - elif i == 1: - return ('p ' + str(q) + '\n') * 2 - - elif i == 2: - return 'h ' + str(q) + '\n' + ('p ' + str(q) + '\n') * 2 + 'h ' + str(q) + '\n' - - else: - return ('p ' + str(q) + '\n') * 2 + 'h ' + str(q) + '\n' + ('p ' + str(q) + '\n') * 2 + 'h ' + str(q) + '\n' - - -def stdgate_to_chp(gate, chpqubits): - """ - todo - Converts any of the standard Clifford gates to a chp string. - """ - gatestr = str(gate).split(':') - name = gatestr[0] - qubits = [chpqubits[q] for q in gatestr[1:]] - - if name == 'Gi': - return '' - - elif name == 'Gxpi': - s = 'h ' + str(qubits[0]) + '\n' - s += ('p ' + str(qubits[0]) + '\n') * 2 - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gypi': - s = ('p ' + str(qubits[0]) + '\n') * 2 - s += 'h ' + str(qubits[0]) + '\n' - s += ('p ' + str(qubits[0]) + '\n') * 2 - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gzpi': - return ('p ' + str(qubits[0]) + '\n') * 2 - - elif name == 'Gxpi2': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gxmpi2': - s = 'h ' + str(qubits[0]) + '\n' - s += ('p ' + str(qubits[0]) + '\n') * 3 - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gzpi2' or name == 'Gp': - return 'p ' + str(qubits[0]) + '\n' - - elif name == 'Gzmpi2' or name == 'Gpdag': - return ('p ' + str(qubits[0]) + '\n') * 3 - - elif name == 'Gh': - return ('h ' + str(qubits[0]) + '\n') - - elif name == 'Gc0': - return '' - - elif name == 'Gc1': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc2': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc3': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc4': - s = 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc5': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc6': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc7': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc8': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc9': - s = 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc10': - s = 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc11': - s = 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc12': - s = 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc13': - s = 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc14': - s = 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc15': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc16': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc17': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc18': - s = 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc19': - s = 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc20': - s = 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc21': - s = 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc22': - s = 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'h ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gc23': - s = 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - s += 'p ' + str(qubits[0]) + '\n' - return s - - elif name == 'Gcnot': - return 'c ' + str(qubits[0]) + ' ' + str(qubits[1]) + '\n' - - elif name == 'Gcphase': - s = 'h ' + str(qubits[1]) + '\n' - s += 'c ' + str(qubits[0]) + ' ' + str(qubits[1]) + '\n' - s = 'h ' + str(qubits[1]) + '\n' - return s - - else: - raise ValueError("{} is an unknown gate! You must make your own `gateinchp` function!".format(gate)) - - -class IndDepolErrorModel(object): - """ - todo - - """ - - def __init__(self, gate_errors, readout_errors): - """ - todo - - """ - self.gate_errors = gate_errors - self.readout_errors = readout_errors - - def layer_uniform_pauli_probability(self, layer, qubitorder): - """ - todo. - - """ - if len(layer) > 0: - return 1 - _np.prod([1 - _np.array([4 * self.gate_errors[gate].get(q, 0) / 3 - for q in qubitorder]) for gate in layer], axis=0) - else: - return _np.zeros(len(qubitorder), float) - - def readout_uniform_pauli_probability(self, qubitorder): - """ - todo - - """ - return 1 - _np.prod([1 - _np.array([4 * self.readout_errors[q1].get(q2, 0) / 3 - for q2 in qubitorder]) for q1 in qubitorder], axis=0) - - -def depolarizing_errors_circuit_simulator(circuitlist, shots, errormodel, gate_to_chp=None, - aux_info_list=None, collision_action='keepseparate', - outdir='', perge_chp_files=True, returnds=True, - verbosity=1): - """ - todo. - - """ - if returnds: - ds = _DataSet(collision_action=collision_action) - else: - ds = [] - assert(_os.path.isfile("chp")), "This simulator uses the chp.c code.\n" + \ - "It must be compiled to an executable called `chp` and situated in this folder!" - - try: - _os.mkdir(outdir) - if perge_chp_files: - perge_dir = True - else: - perge_dir = False - except: - perge_dir = False - pass - - time0 = _time.time() - - if gate_to_chp is None: gate_to_chp = stdgate_to_chp - - percentdone = 0 - for cind, circuit in enumerate(circuitlist): - print(cind) - time1 = _time.time() - - if verbosity > 0: - if verbosity > 1: - print("{0:.2f}".format(cind / len(circuitlist)), end=' ') - else: - if int(_np.floor(cind / len(circuitlist))) > percentdone: - percentdone += 1 - print(" - Simulation {} percent complete.".format(percentdone)) - - n = circuit.num_lines - depth = circuit.depth - - # Set up the CHP qubit labels: could be different CHP labels for each circuit. - aschpq = {label: str(i) for i, label in enumerate(circuit.line_labels)} - - # A list containing the CHP string for each error-free circuit layer. - perfect_chpstrings = [''.join([gate_to_chp(gate, aschpq) for gate in circuit.layer(i)]) - for i in range(depth)] - - # Find the probability of error in each circuit layer. - errorprobs = [errormodel.layer_uniform_pauli_probability(circuit.layer(lind), circuit.line_labels) - for lind in range(circuit.depth)] - # Add measurement error at the end - errorprobs.append(errormodel.readout_uniform_pauli_probability(circuit.line_labels)) - - time2 = _time.time() - - for sample in range(shots): - - # Sample errors for the circuit. Note that if 1 then a uniformly random Pauli is sampled, so - # there is a 1/4 chance of no error even if this is 1. This is correct. - #print(errorprobs) - haserror = [_np.random.binomial(1, ep) for ep in errorprobs] - #for lind in range(depth): - # print('-', lind) - # print(haserror[lind]) - # print([random_pauli_in_chp(q) for q in range(n) if haserror[lind][q] == 1]) - # Construct the CHP string for each error layer. - error_chpstrings = [''.join([''] + [random_pauli_in_chp(q) for q in range(n) if haserror[lind][q] == 1]) - for lind in range(depth)] - - # Interleave the perfect and error CHP strings and then join. - chpstring = '#\n' + ''.join([val for pair in zip(perfect_chpstrings, error_chpstrings) for val in pair]) - - # Add the readout error - chpstring += ''.join([''] + [random_pauli_in_chp(q) for q in range(n) if haserror[depth][q] == 1]) - - # Add a measurement on all the qubits. - chpstring += '\n'.join(['m ' + aschpq[q] for q in circuit.line_labels]) + '\n' - #print(chpstring) - with open(outdir + "/circuit-{}-instance-{}.chp".format(cind, sample), 'w') as f: - f.write(chpstring) - - # Run CHP on this file. - _os.system("./chp " + outdir + "/circuit-{}-instance-{}.chp > ".format(cind, sample) - + outdir + "/circuit-{}-instance-{}-out.txt".format(cind, sample)) - - countdict = {} - for sample in range(shots): - - with open(outdir + "/circuit-{}-instance-{}-out.txt".format(cind, sample), 'r') as f: - #print(cind,sample) - outasdict = {} - for i, line in enumerate(f): - if i > 3: - line = line.strip(' \n') - line = line.split(' ') - # todo : this assumes definite outcome circuits, so fix that - # by instead counting forward from the start of the line. - outasdict[circuit.line_labels[int(line[-2][:-1])]] = line[-1] - - #print(outasdict) - bitstring = ''.join([outasdict[q] for q in circuit.line_labels]) - - if perge_chp_files: - _os.system("rm " + outdir + "/circuit-{}-instance-{}.chp".format(cind, sample)) - _os.system("rm " + outdir + "/circuit-{}-instance-{}-out.txt".format(cind, sample)) - - try: - countdict[bitstring] += 1 - except: - countdict[bitstring] = 1 - - if aux_info_list is not None: - aux = aux_info_list[cind] - else: - aux = None - - if returnds: - ds.add_count_dict(circuit, countdict, record_zero_counts=False, aux=aux) - else: - ds.append(countdict) - - time3 = _time.time() - - if verbosity > 1: - print("({0:.2f}, {1:.2f})".format(time2 - time1, time3 - time2), end=', ') - print("Total time: {0:.2f})".format(time3 - time0)) - - if perge_dir: - _os.system("rmdir " + outdir) - - return ds - - -def circuit_simulator_for_tensored_independent_pauli_errors(circuit, pspec, errormodel, counts, - alloutcomes=False, idle1q_placeholder='I'): - """ - A Clifford circuit simulator for an error model whereby each gate in the circuit induces independent Pauli - errors on some or all of the qubits, with user-specified error probabilities that can vary between gate - and between Pauli. State preparation and measurements errors are restricted to bit-flip errors on the output. - - This simulator is a stochastic-unravelling simulator that uses an efficient-in-qubit-number representation - of the action of Clifford gates on Paulis. Specifically, it samples Pauli errors according to the error - statistics provided, and propogates them through the layers of Clifford gates in the circuit using the - conjugation action of the Cliffords on Paulis (as represented by 2n X 2n symplectic matrices for n qubits). - This is repeated for the number of counts (`counts`) requested. So, this function takes a time to run that - scales as (counts * n^2 * circuit depth). Therefore, this method will be slower than the pyGSTi density-matrix - simulators at low qubit number and high `counts`. - - Parameters - ---------- - circuit : Circuit - The circuit to simulate. It should only contain gates that are also contained within the provided - QubitProcessorSpec `pspec` and are Clifford gates. - - pspec : QubitProcessorSpec - The QubitProcessorSpec that defines the device. The Clifford model in QubitProcessorSpec should contain all of - the gates that are in the circuit. - - errormodel : dict - A dictionary defining the error model. This errormodel should have keys that are Label objects (the - elements of the circuit). The values for a particular Label is an (n,4) numpy array of floats, that - encodes the errors caused by the gate specified by that Label. The (i,j) value in the array is the - probability that this gate is followed by Pauli i where Pauli 0 = identity, Pauli 1 = X, Pauli 2 = Y - and Pauli 3 = Z. So, if the arrray is [1.,0.,0.,0.] in every row then there is no errors, if it is - [1-p,p/3,p/3,p/3] in row j then there is equal probability of each Pauli error on qubit j with an - error probability of p. - - Some simple error models can be auto-constructed using `create_locally_gate_independent_pauli_error_model()` - or create_iid_pauli_error_model()`. - - counts : The number of counts, i.e., the number of repeats of the circuit that data should be generated for. - - alloutcomes : bool, optional - If True then a dictionary is returned where the keys are all possible outcomes (i.e., all length n - bit strings) and the values are the counts for all of the outcomes. If False, then the returned - dictionary only contains keys for those outcomes that happen at least once. - - TODO: docstring: idle1q_placeholder - - Returns - ------- - dict - A dictionary of simulated measurement outcome counts. - """ - n = circuit.number_of_lines - - if set(circuit.line_labels) != set(pspec.qubit_labels): - assert(set(circuit.line_labels).issubset(set(pspec.qubit_labels))) - reduced_errormodel = errormodel.copy() - mask = _np.zeros(pspec.num_qubits, bool) - for i in range(pspec.num_qubits): - if pspec.qubit_labels[i] in circuit.line_labels: - mask[i] = True - for key in list(reduced_errormodel.keys()): - errormatrix = reduced_errormodel[key] - assert(_np.shape(errormatrix)[0] == pspec.num_qubits), "Format of `errormodel` incorrect!" - if len(_np.shape(errormatrix)) == 2: - reduced_errormodel[key] = errormatrix[mask, :] - elif len(_np.shape(errormatrix)) == 1: - reduced_errormodel[key] = errormatrix[mask] - else: raise ValueError("Format of `errormodel` incorrect!") - else: - reduced_errormodel = errormodel - - results = {} - - if alloutcomes: - for i in range(2**n): - result = tuple(_symp.int_to_bitstring(i, n)) - results[result] = 0 - - for i in range(0, counts): - result = oneshot_circuit_simulator_for_tensored_independent_pauli_errors( - circuit, pspec, reduced_errormodel, idle1q_placeholder) - try: results[result] += 1 - except: results[result] = 1 - - return results - - -def oneshot_circuit_simulator_for_tensored_independent_pauli_errors(circuit, pspec, errormodel, idle1q_placeholder='I'): - """ - Generates a single measurement result for the `circuit_simulator_for_tensored_independent_pauli_errors()` - simulator - - Parameters - ---------- - circuit : Circuit - The circuit to simulate. It should only contain gates that are also contained within the provided - QubitProcessorSpec `pspec` and are Clifford gates. - - pspec : QubitProcessorSpec - The QubitProcessorSpec that defines the device. The Clifford model in QubitProcessorSpec should contain all of - the gates that are in the circuit. - - errormodel : dict - A dictionary defining the error model. This errormodel should have keys that are Label objects (the - elements of the circuit). The values for a particular Label is an (n,4) numpy array of floats, that - encodes the errors caused by the gate specified by that Label. The (i,j) value in the array is the - probability that this gate is followed by Pauli i where Pauli 0 = identity, Pauli 1 = X, Pauli 2 = Y - and Pauli 3 = Z. So, if the arrray is [1.,0.,0.,0.] in every row then there is no errors, if it is - [1-p,p/3,p/3,p/3] in row j then there is equal probability of each Pauli error on qubit j with an - error probability of p. - - TODO: docstring: idle1q_placeholder - - Returns - ------- - tuple - A tuple of values that are 0 or 1, corresponding to the results of a z-measurement on all the qubits. - The ordering of this tuple corresponds to the ordering of the wires in the circuit. - """ - n = circuit.number_of_lines - depth = circuit.depth - sout, pout = _symp.prep_stabilizer_state(n, zvals=None) - srep = pspec.models['clifford'].compute_clifford_symplectic_reps() - I = _np.identity(2 * n, int) - - for l in range(depth): - - layer = circuit.layer_with_idles(l, idle_gate_name=idle1q_placeholder) - s, p = _symp.symplectic_rep_of_clifford_layer(layer, n, q_labels=circuit.line_labels, srep_dict=srep) - # Apply the perfect layer to the current state. - sout, pout = _symp.apply_clifford_to_stabilizer_state(s, p, sout, pout) - - # Consider each gate in the layer, and apply Pauli errors with the relevant probs. - for gate in layer: - # Sample a pauli vector for the gate - gerror_p = _np.zeros(2 * n, int) - sampledvec = _np.array([list(_np.random.multinomial(1, pp)) for pp in errormodel[gate]]) - # Z and Y both map X - > -X under conjugation, which is encoded with the upper half of - # the p vector being set to 2. - gerror_p[:n] = 2 * (sampledvec[:, 3] ^ sampledvec[:, 2]) - # X and Y both map Z - > -Z under conjugation, which is encoded with the lower half of - # the p vector being set to 2. - gerror_p[n:] = 2 * (sampledvec[:, 1] ^ sampledvec[:, 2]) - - sout, pout = _symp.apply_clifford_to_stabilizer_state(I, gerror_p, sout, pout) - - output = [] - for q in range(n): - measurement_out = _symp.pauli_z_measurement(sout, pout, q) - # The probability of the '1' outcome - oneprob = measurement_out[1] - # Sample a bit with that probability to be 1. - bit = _np.random.binomial(1, oneprob) - output.append(bit) - - # Add measurement errors, by bit-flipping with some probability - try: - measurement_errors = errormodel['measure'] - except: - measurement_errors = [0 for i in range(n)] - - add_to_outcome = _np.array([_np.random.binomial(1, p) for p in measurement_errors]) - output = tuple(_np.array(output) ^ add_to_outcome) - outputasstring = '' - for s in output: outputasstring += str(s) - return outputasstring - - -def rb_with_pauli_errors(pspec, errormodel, lengths, k, counts, qubit_subset=None, filename=None, rbtype='DRB', - rbspec=None, returndata=True, appenddata=False, verbosity=0, idle1q_placeholder='I'): - """ - Simulates RB with Pauli errors. Can be used to simulated Clifford RB, direct RB and mirror RB. This - function: - - 1) Samples RB circuits - 2) Simulates the RB circuit with the specified Pauli-errors error model - 3) Records the summary RB data to file and/or returns this RB data. - - Step 1 is implemented using the in-built RB samplers. For more information see rb.sample. Step 2 - is implemented using the `circuit_simulator_for_tensored_independent_pauli_errors()` stochastic - errors circuit simulator. See that function for more details. - - Parameters - ---------- - pspec : QubitProcessorSpec - The QubitProcessorSpec that defines the device. - - errormodel : dict - A dictionary defining the error model. This errormodel should have keys that are Label objects - corresponding to the gates in `pspec`. The values for a particular Label is an (n,4) numpy array of - floats, that encodes the errors caused by the gate specified by that Label. The (i,j) value in the - array is the probability that this gate is followed by Pauli i where Pauli 0 = identity, Pauli 1 = X, - Pauli 2 = Y and Pauli 3 = Z. So, if the arrray is [1.,0.,0.,0.] in every row then there is no errors, - if it is [1-p,p/3,p/3,p/3] in row j then there is equal probability of each Pauli error on qubit j with an - error probability of p. - - Some simple error models can be auto-constructed using `create_locally_gate_independent_pauli_error_model()` - or create_iid_pauli_error_model()`. - - lengths : list - A list of the RB lengths to sample and simulate circuits at. E.g., for Clifford RB this is the number - of Cliffords in the uncompiled circuit - 2 (see `rb.sample.clifford_rb_circuit()`). - - k : int - The number of circuits to sample and simulate at each RB length. - - counts : int - The number of counts for each circuit. - - qubit_subset : list - If not None, a list of qubit labels that the RB experiment should be over, that is a subset of the - qubits in `pspec`. - - filename : str, optional - A filename for where to save the data (if None, the data is not saved to file). - - rbtype : {'DRB', 'CRB', 'MRB'} - The RB type to simulate. 'DRB' corresponds to direct RB, 'CRB' corresponds to Clifford RB, - and 'MRB' corresponds to mirror RB. - - rbspec : list, optional - Handed to the RB sampling function for all arguments after `pspec` and the RB lengths, which are the first - two arguments handed to the relevant function. See the relevant RB circuit sampling functions for details. - - returndata : bool, optional - Whether to return the data - - appenddata : bool, optional - If writing to file (i.e., `filename` is not None), whether to append the data to an already existing file - or to write over any existing file. - - verbosity : int, optional - The amount of print-to-screen. - - Returns - ------- - None or RBSummaryDataset - If `returndata` an RBSummaryDataset containing the results. Else, None - - """ - if rbspec is None: - rbspec = [] - assert(rbtype == 'CRB' or rbtype == 'DRB' or rbtype == 'MRB'), "RB type not valid!" - - if filename is not None: - if not appenddata: - with open(filename, 'w') as f: - f.write('# Results from a {} simulation\n'.format(rbtype)) - f.write('# Number of qubits\n') - if qubit_subset is None: f.write(str(pspec.num_qubits)) - else: f.write(str(len(qubit_subset))) - f.write('\n# RB length // Success counts // Total counts ' - '// Circuit depth // Circuit two-qubit gate count\n') - - n = pspec.num_qubits - lengthslist = [] - scounts = [] - cdepths = [] - c2Qgcounts = [] - - for i in range(k): - - if verbosity > 0: - print("- Sampling and simulating circuit {} of {} at each of {} lengths".format(i + 1, k, len(lengths))) - print(" - Number of circuits complete = ", end='') - - for lind, l in enumerate(lengths): - - lengthslist.append(l) - - if rbtype == 'DRB': - c, idealout = _samp.direct_rb_circuit(pspec, l, qubit_subset, *rbspec) - elif rbtype == 'CRB': - c, idealout = _samp.clifford_rb_circuit(pspec, l, qubit_subset, *rbspec) - elif rbtype == 'MRB': - c, idealout = _samp.mirror_rb_circuit(pspec, l, qubit_subset, *rbspec) - - #if verbosity > 0: - # print(" complete") - # print(" - Simulating circuit...",end='') - - outcome = circuit_simulator_for_tensored_independent_pauli_errors( - c, pspec, errormodel, counts, alloutcomes=False, idle1q_placeholder=idle1q_placeholder) - #EGN: Hardcoded 'I' here. Could make this into an arg, but there's really - # no need for the user to modify this unless they use 'I' as a gate label. - if verbosity > 0: print(lind + 1, end=',') - - # Add the number of success counts to the list -# scounts.append(outcome.get(idealout,0)) - scounts.append(outcome.get(''.join(str(idealbit) for idealbit in idealout), 0)) - cdepths.append(c.depth) - c2Qgcounts.append(c.two_q_gate_count()) - - # Write the data to file in each round. - if filename is not None: - with open(filename, 'a') as f: - f.write('{} {} {} {} {}\n'.format(l, scounts[-1], counts, cdepths[-1], c2Qgcounts[-1])) - - if verbosity > 0: print('') - if returndata: - - from . import results as _res - data = _res.RBSummaryDataset(n, lengthslist, success_counts=scounts, total_counts=counts, - circuit_depths=cdepths, circuit_twoQgate_counts=c2Qgcounts) - return data - - -def create_iid_pauli_error_model(pspec, one_qubit_gate_errorrate, two_qubit_gate_errorrate, idle_errorrate, - measurement_errorrate=0., ptype='uniform', idle1q_placeholder='I'): - """ - Returns a dictionary encoding a Pauli-stochastic error model whereby the errors are the same on all the - 1-qubit gates, and the same on all 2-qubit gates. The probability of the 3 different Pauli errors on each - qubit is specified by `ptype` and can either be uniform, or always X, Y, or Z errors. - - The dictionary returned is in the appropriate format for the - `circuit_simulator_for_tensored_independent_pauli_errors()` circuit simulator function. - - Parameters - ---------- - pspec : QubitProcessorSpec - The QubitProcessorSpec that defines the device. - - one_qubit_gate_errorrate : float - The 1-qubit gates error rate (the probability of a Pauli error on the target qubit) not including - idle gates. - - two_qubit_gate_errorrate : float - The 2-qubit gates error rate (the total probability of a Pauli error on either qubit the gate acts - on -- each qubit has independent errors with equal probabilities). - - idle_errorrate : float - The idle gates error rate. - - measurement_errorrate : flip - The measurement error rate for all of the qubits. This is the probability that a qubits measurement - result is bit-flipped. - - ptype : str, optional - Can be 'uniform', 'X', 'Y' or 'Z'. If 'uniform' then 3 Pauli errors are equally likely, if 'X', 'Y' or - 'Z' then the errors are always Pauli X, Y or Z errors, respectively. - - TODO: docstring idle1q_placeholder - - Returns - ------- - dict - An dict that encodes the error model described above in the format required for the simulator - `circuit_simulator_for_tensored_independent_pauli_errors()`. - - """ - if ptype == 'uniform': - def error_row(er): return _np.array([1 - er, er / 3, er / 3, er / 3]) - - elif ptype == 'X': - def error_row(er): return _np.array([1 - er, er, 0., 0.]) - - elif ptype == 'Y': - def error_row(er): return _np.array([1 - er, 0., er, 0.]) - - elif ptype == 'Z': - def error_row(er): return _np.array([1 - er, 0., 0., er]) - else: - raise ValueError("Error model type not understood! Set `ptype` to a valid option.") - - perQ_twoQ_errorrate = 1 - (1 - two_qubit_gate_errorrate)**(1 / 2) - n = pspec.num_qubits - - errormodel = {} - - if idle1q_placeholder is not None: - #Added by EGN: special behavior needed when Model has - # an gate name used to designate a perfect 1-qubit idle op (used as placeholder). - # This translates to a set of ":X" operation labels all w/idle_errorrate - idleLbl = idle1q_placeholder - for q in pspec.qubit_labels: - gl = _Lbl(idleLbl, q) - errormodel[gl] = _np.zeros((n, 4), float) - errormodel[gl][:, 0] = _np.ones(n, float) - errormodel[gl][pspec.qubit_labels.index(q), :] = error_row(idle_errorrate) - - for gate in pspec.models['clifford'].primitive_op_labels: - errormodel[gate] = _np.zeros((n, 4), float) - errormodel[gate][:, 0] = _np.ones(n, float) - - # If not a CNOT, it is a 1-qubit gate / idle. - if gate.num_qubits == 2: - q1 = gate.qubits[0] - q2 = gate.qubits[1] - er = perQ_twoQ_errorrate - errormodel[gate][pspec.qubit_labels.index(q1), :] = error_row(er) - errormodel[gate][pspec.qubit_labels.index(q2), :] = error_row(er) - - elif gate.num_qubits == 1: - q = gate.qubits[0] - er = one_qubit_gate_errorrate - - errormodel[gate][pspec.qubit_labels.index(q), :] = error_row(er) - - else: - raise ValueError("The QubitProcessorSpec must only contain 1- and 2- qubit gates!") - - errormodel['measure'] = _np.array([measurement_errorrate for q in range(n)]) - - return errormodel - - -def create_locally_gate_independent_pauli_error_model(pspec, gate_errorrate_dict, measurement_errorrate_dict=None, - ptype='uniform', idle1q_placeholder='I'): - """ - Returns a dictionary encoding a Pauli-stochastic error model whereby the errors are independent of the gates, - with a qubit subject to an error after a circuit layer with the probabilities specified by the dict - `gate_errorrate_dict`. The probability of the 3 different Pauli errors on each qubit is specified by - `ptype` and can either be uniform, or always X, Y, or Z errors. - - The dictionary returned is in the appropriate format for the - `circuit_simulator_for_tensored_independent_pauli_errors()` circuit simulator function. - - Parameters - ---------- - pspec : QubitProcessorSpec - The QubitProcessorSpec that defines the device. - - gate_errorrate_dict : dict - A dict where the keys are elements of pspec.qubit_labels and the values are floats in [0,1]. - The element for qubit with label `q` is the error probability for that qubit. - - measurement_errorrate_dict : dict - A dict where the keys are elements of pspec.qubit_labels and the values are floats in [0,1]. - The element for qubit with label `q` is the measurement bit-flip error probability for that qubit. - All qubits that do not have a measurement error rate specified are assumed to have perfect measurements. - - ptype : str, optional - Can be 'uniform', 'X', 'Y' or 'Z'. If 'uniform' then 3 Pauli errors are equally likely, if 'X', 'Y' or - 'Z' then the errors are always Pauli X, Y or Z errors, respectively. - - TODO: docstring: idle1q_placeholder - - Returns - ------- - dict - An dict that encodes the error model described above in the format required for the simulator - `circuit_simulator_for_tensored_independent_pauli_errors()`. - - """ - if measurement_errorrate_dict is None: - measurement_errorrate_dict = {} - if ptype == 'uniform': - def error_row(er): return _np.array([1 - er, er / 3, er / 3, er / 3]) - - elif ptype == 'X': - def error_row(er): return _np.array([1 - er, er, 0., 0.]) - - elif ptype == 'Y': - def error_row(er): return _np.array([1 - er, 0., er, 0.]) - - elif ptype == 'Z': - def error_row(er): return _np.array([1 - er, 0., 0., er]) - else: - raise ValueError("Error model type not understood! Set `ptype` to a valid option.") - - n = pspec.num_qubits - - errormodel = {} - - if idle1q_placeholder is not None: - #Added by EGN: special behavior needed when Model has - # an gate name used to designate a perfect 1-qubit idle op (used as placeholder). - # This translates to a set of ":X" operation labels all w/appropriate errorrate - idleLbl = idle1q_placeholder - for q in pspec.qubit_labels: - gl = _Lbl(idleLbl, q) - er = gate_errorrate_dict[q] - errormodel[gl] = _np.zeros((n, 4), float) - errormodel[gl][:, 0] = _np.ones(n, float) - errormodel[gl][pspec.qubit_labels.index(q), :] = error_row(er) - - for gate in pspec.models['clifford'].primitive_op_labels: - errormodel[gate] = _np.zeros((n, 4), float) - errormodel[gate][:, 0] = _np.ones(n, float) - - for q in gate.qubits: - er = gate_errorrate_dict[q] - errormodel[gate][pspec.qubit_labels.index(q)] = error_row(er) - - errormodel['measure'] = _np.array([measurement_errorrate_dict.get(q, 0.) for q in pspec.qubit_labels]) - - return errormodel - -# -# TODO : DOES THIS NEED AND IDLE PLACEHOLDER? -# - - -def create_local_pauli_error_model(pspec, one_qubit_gate_errorrate_dict, two_qubit_gate_errorrate_dict, - measurement_errorrate_dict=None, ptype='uniform'): - """ - Returns a dictionary encoding a Pauli-stochastic error model whereby the errors caused by a gate act - only on the "target" qubits of the gate, all the 1-qubit gates on a qubit have the same error rate, - and all the 2-qubit gates on a qubit have the same error rate. The probability of the 3 different Pauli - errors on each qubit is specified by `ptype` and can either be uniform, or always X, Y, or Z errors. - - The dictionary returned is in the appropriate format for the - `circuit_simulator_for_tensored_independent_pauli_errors()` circuit simulator function. - - Parameters - ---------- - pspec : QubitProcessorSpec - The QubitProcessorSpec that defines the device. - - one_qubit_gate_errorrate_dict : dict - A dict where the keys are elements of pspec.qubit_labels and the values are floats in [0,1]. - The element for qubit with label `q` is the error probability for all 1-qubit gates on that qubit - - two_qubit_gate_errorrate_dict : dict - A dict where the keys are 2-qubit gates in pspec and the values are floats in [0,1]. This is the - error probability for the 2-qubit gate, split evenly into independent Pauli errors on each of the - qubits the gate is intended to act on. - - measurement_errorrate_dict : dict - A dict where the keys are elements of pspec.qubit_labels and the values are floats in [0,1]. - The element for qubit with label `q` is the measurement bit-flip error probability for that qubit. - All qubits that do not have a measurement error rate specified are assumed to have perfect measurements. - - - ptype : str, optional - Can be 'uniform', 'X', 'Y' or 'Z'. If 'uniform' then 3 Pauli errors are equally likely, if 'X', 'Y' or - 'Z' then the errors are always Pauli X, Y or Z errors, respectively. - - Returns - ------- - dict - An dict that encodes the error model described above in the format required for the simulator - `circuit_simulator_for_tensored_independent_pauli_errors()`. - - """ - if measurement_errorrate_dict is None: - measurement_errorrate_dict = {} - if ptype == 'uniform': - def error_row(er): return _np.array([1 - er, er / 3, er / 3, er / 3]) - - elif ptype == 'X': - def error_row(er): return _np.array([1 - er, er, 0., 0.]) - - elif ptype == 'Y': - def error_row(er): return _np.array([1 - er, 0., er, 0.]) - - elif ptype == 'Z': - def error_row(er): return _np.array([1 - er, 0., 0., er]) - else: - raise ValueError("Error model type not understood! Set `ptype` to a valid option.") - - n = pspec.num_qubits - - errormodel = {} - for gate in list(pspec.models['clifford'].primitive_op_labels): - errormodel[gate] = _np.zeros((n, 4), float) - errormodel[gate][:, 0] = _np.ones(n, float) - - if gate.num_qubits == 1: - er = one_qubit_gate_errorrate_dict[gate.qubits[0]] - elif gate.num_qubits == 2: - er = 1 - (1 - two_qubit_gate_errorrate_dict[gate])**(0.5) - else: raise ValueError("Only 1- and 2-qubit gates supported!") - - for q in gate.qubits: - errormodel[gate][pspec.qubit_labels.index(q)] = error_row(er) - - errormodel['measure'] = _np.array([measurement_errorrate_dict.get(q, 0.) for q in pspec.qubit_labels]) - - return errormodel diff --git a/pygsti/extras/rpe/rpeconstruction.py b/pygsti/extras/rpe/rpeconstruction.py index 6b48af2f8..6559592e2 100644 --- a/pygsti/extras/rpe/rpeconstruction.py +++ b/pygsti/extras/rpe/rpeconstruction.py @@ -113,8 +113,6 @@ def create_parameterized_rpe_model(alpha_true, epsilon_true, aux_rot, spam_depol return outputModel -#def make_rpe_alpha_str_lists(k_list,angleStr,rpeconfig_inst): - def create_rpe_angle_circuit_lists(k_list, angle_name, rpeconfig_inst): """ diff --git a/pygsti/forwardsims/forwardsim.py b/pygsti/forwardsims/forwardsim.py index 2ae19f2f3..3d85d92e1 100644 --- a/pygsti/forwardsims/forwardsim.py +++ b/pygsti/forwardsims/forwardsim.py @@ -147,57 +147,6 @@ def _set_evotype(self, evotype): `evotype` will be `None` when the current model is None""" pass - #def to_vector(self): - # """ - # Returns the parameter vector of the associated Model. - # - # Returns - # ------- - # numpy array - # The vectorized model parameters. - # """ - # return self.paramvec - # - #def from_vector(self, v, close=False, nodirty=False): - # """ - # The inverse of to_vector. - # - # Initializes the Model-like members of this - # calculator based on `v`. Used for computing finite-difference derivatives. - # - # Parameters - # ---------- - # v : numpy.ndarray - # The parameter vector. - # - # close : bool, optional - # Set to `True` if `v` is close to the current parameter vector. - # This can make some operations more efficient. - # - # nodirty : bool, optional - # If True, the framework for marking and detecting when operations - # have changed and a Model's parameter-vector needs to be updated - # is disabled. Disabling this will increases the speed of the call. - # - # Returns - # ------- - # None - # """ - # #Note: this *will* initialize the parent Model's objects too, - # # since only references to preps, effects, and gates are held - # # by the calculator class. ORDER is important, as elements of - # # POVMs and Instruments rely on a fixed from_vector ordering - # # of their simplified effects/gates. - # self.paramvec = v.copy() # now self.paramvec is *not* the same as the Model's paramvec - # self.sos.from_vector(v, close, nodirty) # so don't always want ", nodirty=True)" - we - # # need to set dirty flags so *parent* will re-init it's paramvec... - # - # #Re-init reps for computation - # #self.operationreps = { i:self.operations[lbl].torep() for lbl,i in self.operation_lookup.items() } - # #self.operationreps = { lbl:g.torep() for lbl,g in gates.items() } - # #self.prepreps = { lbl:p.torep('prep') for lbl,p in preps.items() } - # #self.effectreps = { lbl:e.torep('effect') for lbl,e in effects.items() } - def _compute_circuit_outcome_probabilities(self, array_to_fill, circuit, outcomes, resource_alloc, time=None): raise NotImplementedError("Derived classes should implement this!") diff --git a/pygsti/forwardsims/mapforwardsim.py b/pygsti/forwardsims/mapforwardsim.py index 2a42f8891..f1db5ddb1 100644 --- a/pygsti/forwardsims/mapforwardsim.py +++ b/pygsti/forwardsims/mapforwardsim.py @@ -314,13 +314,6 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types layout._param_dimensions, (loc_nparams1, loc_nparams2), (blk1, blk2), max_atom_cachesize, self.model.dim) - #def approx_mem_estimate(nc, np1, np2): - # approx_cachesize = (num_circuits / nc) * 1.3 # inflate expected # of circuits per atom => cache_size - # return _bytes_for_array_types(array_types, num_elements, num_elements / nc, - # num_circuits, num_circuits / nc, - # (num_params, num_params), (num_params / np1, num_params / np2), - # approx_cachesize, self.model.dim) - GB = 1.0 / 1024.0**3 if mem_estimate > mem_limit: raise MemoryError("Not enough memory for desired layout! (limit=%.1fGB, required=%.1fGB)" % ( diff --git a/pygsti/forwardsims/matrixforwardsim.py b/pygsti/forwardsims/matrixforwardsim.py index 6199376cf..bad797e36 100644 --- a/pygsti/forwardsims/matrixforwardsim.py +++ b/pygsti/forwardsims/matrixforwardsim.py @@ -1147,13 +1147,6 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types (blk1, blk2), max_atom_cachesize, self.model.evotype.minimal_dim(self.model.state_space)) - #def approx_mem_estimate(natoms, np1, np2): - # approx_cachesize = (num_circuits / natoms) * 1.3 # inflate expected # circuits per atom => cache_size - # return _bytes_for_array_types(array_types, num_elements, num_elements / natoms, - # num_circuits, num_circuits / natoms, - # (num_params, num_params), (num_params / np1, num_params / np2), - # approx_cachesize, self.model.state_space.dim) - GB = 1.0 / 1024.0**3 if mem_estimate > mem_limit: raise MemoryError("Not enough memory for desired layout! (limit=%.1fGB, required=%.1fGB)" % ( diff --git a/pygsti/forwardsims/termforwardsim.py b/pygsti/forwardsims/termforwardsim.py index d6b00b4fc..06a890992 100644 --- a/pygsti/forwardsims/termforwardsim.py +++ b/pygsti/forwardsims/termforwardsim.py @@ -243,16 +243,6 @@ def __getstate__(self): # and this is done by the parent model which will cause _set_evotype to be called. return state - #OLD - now we have a _set_evotype method. - #@_ForwardSimulator.model.setter - #def model(self, val): - # _ForwardSimulator.model.fset(self, val) # set the base class property (self.model) - # - # #Do some additional initialization - # if self.model.evotype not in ("svterm", "cterm"): - # #raise ValueError(f"Evolution type {self.model.evotype} is incompatible with term-based calculations") - # _warnings.warn("Evolution type %s is incompatible with term-based calculations" % self.model.evotype) - def copy(self): """ Return a shallow copy of this TermForwardSimulator. @@ -416,105 +406,6 @@ def _bulk_fill_hprobs_atom(self, array_to_fill, dest_param_slice1, dest_param_sl hpolys[0], hpolys[1], self.model.to_vector(), (nEls, len(wrtInds1), len(wrtInds2))) _fas(array_to_fill, [slice(0, array_to_fill.shape[0]), dest_param_slice1, dest_param_slice2], hprobs) - #DIRECT FNS - keep these around, but they need to be updated (as do routines in fastreplib.pyx) - #def _prs_directly(self, layout_atom, resource_alloc): #comm=None, mem_limit=None, reset_wts=True, repcache=None): - # """ - # Compute probabilities of `layout`'s circuits using "direct" mode. - # - # Parameters - # ---------- - # layout : CircuitOutcomeProbabilityArrayLayout - # The layout. - # - # comm : mpi4py.MPI.Comm, optional - # When not None, an MPI communicator for distributing the computation - # across multiple processors. Distribution is performed over - # subtrees of eval_tree (if it is split). - # - # mem_limit : int, optional - # A rough memory limit in bytes. - # - # reset_wts : bool, optional - # Whether term magnitudes should be updated based on current term coefficients - # (which are based on the current point in model-parameter space) or not. - # - # repcache : dict, optional - # A cache of term representations for increased performance. - # """ - # prs = _np.empty(layout_atom.num_elements, 'd') - # #print("Computing prs directly for %d circuits" % len(circuit_list)) - # if repcache is None: repcache = {} # new repcache... - # k = 0 # *linear* evaluation order so we know final indices are just running - # for i in eval_tree.evaluation_order(): - # circuit = eval_tree[i] - # #print("Computing prs directly: circuit %d of %d" % (i,len(circuit_list))) - # assert(self.evotype == "svterm") # for now, just do SV case - # fastmode = False # start with slow mode - # wtTol = 0.1 - # rholabel = circuit[0] - # opStr = circuit[1:] - # elabels = eval_tree.simplified_circuit_elabels[i] - # prs[k:k + len(elabels)] = replib.SV_prs_directly(self, rholabel, elabels, opStr, - # repcache, comm, mem_limit, fastmode, wtTol, reset_wts, - # self.times_debug) - # k += len(elabels) - # #print("PRS = ",prs) - # return prs - # - #def _dprs_directly(self, eval_tree, wrt_slice, comm=None, mem_limit=None, reset_wts=True, repcache=None): - # """ - # Compute probability derivatives of `eval_tree`'s circuits using "direct" mode. - # - # Parameters - # ---------- - # eval_tree : TermEvalTree - # The evaluation tree. - # - # wrt_slice : slice - # A slice specifying which model parameters to differentiate with respect to. - # - # comm : mpi4py.MPI.Comm, optional - # When not None, an MPI communicator for distributing the computation - # across multiple processors. Distribution is performed over - # subtrees of eval_tree (if it is split). - # - # mem_limit : int, optional - # A rough memory limit in bytes. - # - # reset_wts : bool, optional - # Whether term magnitudes should be updated based on current term coefficients - # (which are based on the current point in model-parameter space) or not. - # - # repcache : dict, optional - # A cache of term representations for increased performance. - # """ - # #Note: Finite difference derivatives are SLOW! - # if wrt_slice is None: - # wrt_indices = list(range(self.Np)) - # elif isinstance(wrt_slice, slice): - # wrt_indices = _slct.indices(wrt_slice) - # else: - # wrt_indices = wrt_slice - # - # eps = 1e-6 # HARDCODED - # probs = self._prs_directly(eval_tree, comm, mem_limit, reset_wts, repcache) - # dprobs = _np.empty((eval_tree.num_final_elements(), len(wrt_indices)), 'd') - # orig_vec = self.to_vector().copy() - # iParamToFinal = {i: ii for ii, i in enumerate(wrt_indices)} - # for i in range(self.Np): - # #print("direct dprobs cache %d of %d" % (i,self.Np)) - # if i in iParamToFinal: # LATER: add MPI support? - # iFinal = iParamToFinal[i] - # vec = orig_vec.copy(); vec[i] += eps - # self.from_vector(vec, close=True) - # dprobs[:, iFinal] = (self._prs_directly(eval_tree, - # comm=None, - # mem_limit=None, - # reset_wts=False, - # repcache=repcache) - probs) / eps - # self.from_vector(orig_vec, close=True) - # return dprobs - ## ----- Find a "minimal" path set (i.e. find thresholds for each circuit ----- def _compute_pruned_pathmag_threshold(self, rholabel, elabels, circuit, polynomial_vindices_per_int, repcache, circuitsetup_cache, diff --git a/pygsti/forwardsims/termforwardsim_calc_statevec.pyx b/pygsti/forwardsims/termforwardsim_calc_statevec.pyx index d6a2d5ac0..e784aaa24 100644 --- a/pygsti/forwardsims/termforwardsim_calc_statevec.pyx +++ b/pygsti/forwardsims/termforwardsim_calc_statevec.pyx @@ -1531,654 +1531,3 @@ def circuit_achieved_and_max_sopm(fwdsim, rholabel, elabels, circuit, repcache, max_sopm[i] = max_sum_of_pathmags[i] return achieved_sopm, max_sopm - - - - -# State-vector direct-term calcs ------------------------- - -#cdef vector[vector[TermDirectCRep_ptr]] extract_cterms_direct(python_termrep_lists, INT max_order): -# cdef vector[vector[TermDirectCRep_ptr]] ret = vector[vector[TermDirectCRep_ptr]](max_order+1) -# cdef vector[TermDirectCRep*] vec_of_terms -# for order,termreps in enumerate(python_termrep_lists): # maxorder+1 lists -# vec_of_terms = vector[TermDirectCRep_ptr](len(termreps)) -# for i,termrep in enumerate(termreps): -# vec_of_terms[i] = (termrep).c_term -# ret[order] = vec_of_terms -# return ret - -#def prs_directly(calc, rholabel, elabels, circuit, repcache, comm=None, mem_limit=None, fastmode=True, wt_tol=0.0, reset_term_weights=True, debug=None): -# -# # Create gatelable -> int mapping to be used throughout -# distinct_gateLabels = sorted(set(circuit)) -# glmap = { gl: i for i,gl in enumerate(distinct_gateLabels) } -# t0 = pytime.time() -# -# # Convert circuit to a vector of ints -# cdef INT i, j -# cdef vector[INT] cgatestring -# for gl in circuit: -# cgatestring.push_back(glmap[gl]) -# -# #TODO: maybe compute these weights elsewhere and pass in? -# cdef double circuitWeight -# cdef double remaingingWeightTol = wt_tol -# cdef vector[double] remainingWeight = vector[double](len(elabels)) -# if 'circuitWeights' not in repcache: -# repcache['circuitWeights'] = {} -# if reset_term_weights or circuit not in repcache['circuitWeights']: -# circuitWeight = calc.sos.get_prep(rholabel).total_term_weight() -# for gl in circuit: -# circuitWeight *= calc.sos.get_operation(gl).total_term_weight() -# for i,elbl in enumerate(elabels): -# remainingWeight[i] = circuitWeight * calc.sos.get_effect(elbl).total_term_weight() -# repcache['circuitWeights'][circuit] = [ remainingWeight[i] for i in range(remainingWeight.size()) ] -# else: -# for i,wt in enumerate(repcache['circuitWeights'][circuit]): -# assert(wt > 1.0) -# remainingWeight[i] = wt -# -# #if reset_term_weights: -# # print "Remaining weights: " -# # for i in range(remainingWeight.size()): -# # print remainingWeight[i] -# -# cdef double order_base = 0.1 # default for now - TODO: make this a calc param like max_order? -# cdef INT order -# cdef INT numEs = len(elabels) -# -# cdef RepCacheEl repcel; -# cdef vector[TermDirectCRep_ptr] treps; -# cdef DCOMPLEX* coeffs; -# cdef vector[TermDirectCRep*] reps_at_order; -# cdef np.ndarray coeffs_array; -# cdef TermDirectRep rep; -# -# # Construct dict of gate term reps, then *convert* to c-reps, as this -# # keeps alive the non-c-reps which keep the c-reps from being deallocated... -# cdef unordered_map[INT, vector[vector[TermDirectCRep_ptr]] ] op_term_reps = unordered_map[INT, vector[vector[TermDirectCRep_ptr]] ](); # OLD = {} -# for glbl in distinct_gateLabels: -# if glbl in repcache: -# repcel = repcache[glbl] -# op_term_reps[ glmap[glbl] ] = repcel.reps -# for order in range(calc.max_order+1): -# treps = repcel.reps[order] -# coeffs_array = calc.sos.operation(glbl).get_direct_order_coeffs(order,order_base) -# coeffs = (coeffs_array.data) -# for i in range(treps.size()): -# treps[i]._coeff = coeffs[i] -# if reset_term_weights: treps[i]._magnitude = abs(coeffs[i]) -# #for order,treps in enumerate(op_term_reps[ glmap[glbl] ]): -# # for coeff,trep in zip(calc.sos.operation(glbl).get_direct_order_coeffs(order,order_base), treps): -# # trep.set_coeff(coeff) -# else: -# repcel = RepCacheEl(calc.max_order) -# for order in range(calc.max_order+1): -# reps_at_order = vector[TermDirectCRep_ptr](0) -# for t in calc.sos.operation(glbl).get_direct_order_terms(order,order_base): -# rep = (t.torep(None,None,"gate")) -# repcel.pyterm_references.append(rep) -# reps_at_order.push_back( rep.c_term ) -# repcel.reps[order] = reps_at_order -# #OLD -# #reps = [ [t.torep(None,None,"gate") for t in calc.sos.operation(glbl).get_direct_order_terms(order,order_base)] -# # for order in range(calc.max_order+1) ] -# op_term_reps[ glmap[glbl] ] = repcel.reps -# repcache[glbl] = repcel -# -# #OLD -# #op_term_reps = { glmap[glbl]: [ [t.torep(None,None,"gate") for t in calc.sos.operation(glbl).get_direct_order_terms(order,order_base)] -# # for order in range(calc.max_order+1) ] -# # for glbl in distinct_gateLabels } -# -# #Similar with rho_terms and E_terms -# cdef vector[vector[TermDirectCRep_ptr]] rho_term_reps; -# if rholabel in repcache: -# repcel = repcache[rholabel] -# rho_term_reps = repcel.reps -# for order in range(calc.max_order+1): -# treps = rho_term_reps[order] -# coeffs_array = calc.sos.prep(rholabel).get_direct_order_coeffs(order,order_base) -# coeffs = (coeffs_array.data) -# for i in range(treps.size()): -# treps[i]._coeff = coeffs[i] -# if reset_term_weights: treps[i]._magnitude = abs(coeffs[i]) -# -# #for order,treps in enumerate(rho_term_reps): -# # for coeff,trep in zip(calc.sos.prep(rholabel).get_direct_order_coeffs(order,order_base), treps): -# # trep.set_coeff(coeff) -# else: -# repcel = RepCacheEl(calc.max_order) -# for order in range(calc.max_order+1): -# reps_at_order = vector[TermDirectCRep_ptr](0) -# for t in calc.sos.prep(rholabel).get_direct_order_terms(order,order_base): -# rep = (t.torep(None,None,"prep")) -# repcel.pyterm_references.append(rep) -# reps_at_order.push_back( rep.c_term ) -# repcel.reps[order] = reps_at_order -# rho_term_reps = repcel.reps -# repcache[rholabel] = repcel -# -# #OLD -# #rho_term_reps = [ [t.torep(None,None,"prep") for t in calc.sos.prep(rholabel).get_direct_order_terms(order,order_base)] -# # for order in range(calc.max_order+1) ] -# #repcache[rholabel] = rho_term_reps -# -# #E_term_reps = [] -# cdef vector[vector[TermDirectCRep_ptr]] E_term_reps = vector[vector[TermDirectCRep_ptr]](0); -# cdef TermDirectCRep_ptr cterm; -# e_indices = [] # TODO: upgrade to C-type? -# if all([ elbl in repcache for elbl in elabels]): -# for order in range(calc.max_order+1): -# reps_at_order = vector[TermDirectCRep_ptr](0) # the term reps for *all* the effect vectors -# cur_indices = [] # the Evec-index corresponding to each term rep -# for j,elbl in enumerate(elabels): -# repcel = repcache[elbl] -# #term_reps = [t.torep(None,None,"effect") for t in calc.sos.effect(elbl).get_direct_order_terms(order,order_base) ] -# -# treps = repcel.reps[order] -# coeffs_array = calc.sos.effect(elbl).get_direct_order_coeffs(order,order_base) -# coeffs = (coeffs_array.data) -# for i in range(treps.size()): -# treps[i]._coeff = coeffs[i] -# if reset_term_weights: treps[i]._magnitude = abs(coeffs[i]) -# reps_at_order.push_back(treps[i]) -# cur_indices.extend( [j]*reps_at_order.size() ) -# -# #OLD -# #term_reps = repcache[elbl][order] -# #for coeff,trep in zip(calc.sos.effect(elbl).get_direct_order_coeffs(order,order_base), term_reps): -# # trep.set_coeff(coeff) -# #cur_term_reps.extend( term_reps ) -# # cur_indices.extend( [j]*len(term_reps) ) -# -# E_term_reps.push_back(reps_at_order) -# e_indices.append( cur_indices ) -# # E_term_reps.append( cur_term_reps ) -# -# else: -# for elbl in elabels: -# if elbl not in repcache: repcache[elbl] = RepCacheEl(calc.max_order) #[None]*(calc.max_order+1) # make sure there's room -# for order in range(calc.max_order+1): -# reps_at_order = vector[TermDirectCRep_ptr](0) # the term reps for *all* the effect vectors -# cur_indices = [] # the Evec-index corresponding to each term rep -# for j,elbl in enumerate(elabels): -# repcel = repcache[elbl] -# treps = vector[TermDirectCRep_ptr](0) # the term reps for *all* the effect vectors -# for t in calc.sos.effect(elbl).get_direct_order_terms(order,order_base): -# rep = (t.torep(None,None,"effect")) -# repcel.pyterm_references.append(rep) -# treps.push_back( rep.c_term ) -# reps_at_order.push_back( rep.c_term ) -# repcel.reps[order] = treps -# cur_indices.extend( [j]*treps.size() ) -# #term_reps = [t.torep(None,None,"effect") for t in calc.sos.effect(elbl).get_direct_order_terms(order,order_base) ] -# #repcache[elbl][order] = term_reps -# #cur_term_reps.extend( term_reps ) -# #cur_indices.extend( [j]*len(term_reps) ) -# E_term_reps.push_back(reps_at_order) -# e_indices.append( cur_indices ) -# #E_term_reps.append( cur_term_reps ) -# -# #convert to c-reps -# cdef INT gi -# #cdef vector[vector[TermDirectCRep_ptr]] rho_term_creps = rho_term_reps # already c-reps... -# #cdef vector[vector[TermDirectCRep_ptr]] E_term_creps = E_term_reps # already c-reps... -# #cdef unordered_map[INT, vector[vector[TermDirectCRep_ptr]]] gate_term_creps = op_term_reps # already c-reps... -# #cdef vector[vector[TermDirectCRep_ptr]] rho_term_creps = extract_cterms_direct(rho_term_reps,calc.max_order) -# #cdef vector[vector[TermDirectCRep_ptr]] E_term_creps = extract_cterms_direct(E_term_reps,calc.max_order) -# #for gi,termrep_lists in op_term_reps.items(): -# # gate_term_creps[gi] = extract_cterms_direct(termrep_lists,calc.max_order) -# -# E_cindices = vector[vector[INT]](len(e_indices)) -# for ii,inds in enumerate(e_indices): -# E_cindices[ii] = vector[INT](len(inds)) -# for jj,indx in enumerate(inds): -# E_cindices[ii][jj] = indx -# -# #Note: term calculator "dim" is the full density matrix dim -# stateDim = int(round(np.sqrt(calc.dim))) -# if debug is not None: -# debug['tstartup'] += pytime.time()-t0 -# t0 = pytime.time() -# -# #Call C-only function (which operates with C-representations only) -# cdef vector[float] debugvec = vector[float](10) -# debugvec[0] = 0.0 -# cdef vector[DCOMPLEX] prs = prs_directly( -# cgatestring, rho_term_reps, op_term_reps, E_term_reps, -# #cgatestring, rho_term_creps, gate_term_creps, E_term_creps, -# E_cindices, numEs, calc.max_order, stateDim, fastmode, &remainingWeight, remaingingWeightTol, debugvec) -# -# debug['total'] += debugvec[0] -# debug['t1'] += debugvec[1] -# debug['t2'] += debugvec[2] -# debug['t3'] += debugvec[3] -# debug['n1'] += debugvec[4] -# debug['n2'] += debugvec[5] -# debug['n3'] += debugvec[6] -# debug['t4'] += debugvec[7] -# debug['n4'] += debugvec[8] -# #if not all([ abs(prs[i].imag) < 1e-4 for i in range(prs.size()) ]): -# # print("ERROR: prs = ",[ prs[i] for i in range(prs.size()) ]) -# #assert(all([ abs(prs[i].imag) < 1e-6 for i in range(prs.size()) ])) -# return [ prs[i].real for i in range(prs.size()) ] # TODO: make this into a numpy array? - maybe pass array to fill to prs_directy above? -# -# -#cdef vector[DCOMPLEX] prs_directly( -# vector[INT]& circuit, vector[vector[TermDirectCRep_ptr]] rho_term_reps, -# unordered_map[INT, vector[vector[TermDirectCRep_ptr]]] op_term_reps, -# vector[vector[TermDirectCRep_ptr]] E_term_reps, vector[vector[INT]] E_term_indices, -# INT numEs, INT max_order, INT dim, bool fastmode, vector[double]* remainingWeight, double remTol, vector[float]& debugvec): -# -# #NOTE: circuit and gate_terms use *integers* as operation labels, not Label objects, to speed -# # lookups and avoid weird string conversion stuff with Cython -# -# cdef INT N = len(circuit) -# cdef INT* p = malloc((N+2) * sizeof(INT)) -# cdef INT i,j,k,order,nTerms -# cdef INT gn -# -# cdef INT t0 = time.clock() -# cdef INT t, n, nPaths; #for below -# -# cdef innerloopfn_direct_ptr innerloop_fn; -# if fastmode: -# innerloop_fn = pr_directly_innerloop_savepartials -# else: -# innerloop_fn = pr_directly_innerloop -# -# #extract raw data from gate_terms dictionary-of-lists for faster lookup -# #gate_term_prefactors = np.empty( (nOperations,max_order+1,dim,dim) -# #cdef unordered_map[INT, vector[vector[unordered_map[INT, complex]]]] gate_term_coeffs -# #cdef vector[vector[unordered_map[INT, complex]]] rho_term_coeffs -# #cdef vector[vector[unordered_map[INT, complex]]] E_term_coeffs -# #cdef vector[vector[INT]] e_indices -# -# cdef vector[INT]* Einds -# cdef vector[vector_TermDirectCRep_ptr_ptr] factor_lists -# -# assert(max_order <= 2) # only support this partitioning below (so far) -# -# cdef vector[DCOMPLEX] prs = vector[DCOMPLEX](numEs) -# -# for order in range(max_order+1): -# #print("DB: pr_as_polynomial order=",order) -# -# #for p in partition_into(order, N): -# for i in range(N+2): p[i] = 0 # clear p -# factor_lists = vector[vector_TermDirectCRep_ptr_ptr](N+2) -# -# if order == 0: -# #inner loop(p) -# #factor_lists = [ gate_terms[glbl][pi] for glbl,pi in zip(circuit,p) ] -# t = time.clock() -# factor_lists[0] = &rho_term_reps[p[0]] -# for k in range(N): -# gn = circuit[k] -# factor_lists[k+1] = &op_term_reps[circuit[k]][p[k+1]] -# #if factor_lists[k+1].size() == 0: continue # WHAT??? -# factor_lists[N+1] = &E_term_reps[p[N+1]] -# Einds = &E_term_indices[p[N+1]] -# -# #print("Part0 ",p) -# nPaths = innerloop_fn(factor_lists,Einds,&prs,dim,remainingWeight,0.0) #remTol) # force 0-order -# debugvec[1] += float(time.clock() - t)/time.CLOCKS_PER_SEC -# debugvec[4] += nPaths -# -# elif order == 1: -# t = time.clock(); n=0 -# for i in range(N+2): -# p[i] = 1 -# #inner loop(p) -# factor_lists[0] = &rho_term_reps[p[0]] -# for k in range(N): -# gn = circuit[k] -# factor_lists[k+1] = &op_term_reps[gn][p[k+1]] -# #if len(factor_lists[k+1]) == 0: continue #WHAT??? -# factor_lists[N+1] = &E_term_reps[p[N+1]] -# Einds = &E_term_indices[p[N+1]] -# -# #print "DB: Order1 " -# nPaths = innerloop_fn(factor_lists,Einds,&prs,dim,remainingWeight,0.0) #remTol) # force 1st-order -# p[i] = 0 -# n += nPaths -# debugvec[2] += float(time.clock() - t)/time.CLOCKS_PER_SEC -# debugvec[5] += n -# -# elif order == 2: -# t = time.clock(); n=0 -# for i in range(N+2): -# p[i] = 2 -# #inner loop(p) -# factor_lists[0] = &rho_term_reps[p[0]] -# for k in range(N): -# gn = circuit[k] -# factor_lists[k+1] = &op_term_reps[circuit[k]][p[k+1]] -# #if len(factor_lists[k+1]) == 0: continue # WHAT??? -# factor_lists[N+1] = &E_term_reps[p[N+1]] -# Einds = &E_term_indices[p[N+1]] -# -# nPaths = innerloop_fn(factor_lists,Einds,&prs,dim,remainingWeight,remTol) -# p[i] = 0 -# n += nPaths -# -# debugvec[3] += float(time.clock() - t)/time.CLOCKS_PER_SEC -# debugvec[6] += n -# t = time.clock(); n=0 -# -# for i in range(N+2): -# p[i] = 1 -# for j in range(i+1,N+2): -# p[j] = 1 -# #inner loop(p) -# factor_lists[0] = &rho_term_reps[p[0]] -# for k in range(N): -# gn = circuit[k] -# factor_lists[k+1] = &op_term_reps[circuit[k]][p[k+1]] -# #if len(factor_lists[k+1]) == 0: continue #WHAT??? -# factor_lists[N+1] = &E_term_reps[p[N+1]] -# Einds = &E_term_indices[p[N+1]] -# -# nPaths = innerloop_fn(factor_lists,Einds,&prs,dim,remainingWeight,remTol) -# p[j] = 0 -# n += nPaths -# p[i] = 0 -# debugvec[7] += float(time.clock() - t)/time.CLOCKS_PER_SEC -# debugvec[8] += n -# -# else: -# assert(False) # order > 2 not implemented yet... -# -# free(p) -# -# debugvec[0] += float(time.clock() - t0)/time.CLOCKS_PER_SEC -# return prs -# -# -# -#cdef INT pr_directly_innerloop(vector[vector_TermDirectCRep_ptr_ptr] factor_lists, vector[INT]* Einds, -# vector[DCOMPLEX]* prs, INT dim, vector[double]* remainingWeight, double remainingWeightTol): -# #print("DB partition = ","listlens = ",[len(fl) for fl in factor_lists]) -# -# cdef INT i,j,Ei -# cdef double complex scale, val, newval, pLeft, pRight, p -# cdef double wt, cwt -# cdef int nPaths = 0 -# -# cdef TermDirectCRep* factor -# -# cdef INT nFactorLists = factor_lists.size() # may need to recompute this after fast-mode -# cdef INT* factorListLens = malloc(nFactorLists * sizeof(INT)) -# cdef INT last_index = nFactorLists-1 -# -# for i in range(nFactorLists): -# factorListLens[i] = factor_lists[i].size() -# if factorListLens[i] == 0: -# free(factorListLens) -# return 0 # nothing to loop over! - (exit before we allocate more) -# -# cdef double complex coeff # THESE are only real changes from "as_polynomial" -# cdef double complex result # version of this function (where they are PolynomialCRep type) -# -# cdef StateCRep *prop1 = new StateCRep(dim) -# cdef StateCRep *prop2 = new StateCRep(dim) -# cdef StateCRep *tprop -# cdef EffectCRep* EVec -# -# cdef INT* b = malloc(nFactorLists * sizeof(INT)) -# for i in range(nFactorLists): b[i] = 0 -# -# assert(nFactorLists > 0), "Number of factor lists must be > 0!" -# -# #for factors in _itertools.product(*factor_lists): -# while(True): -# final_factor_indx = b[last_index] -# Ei = deref(Einds)[final_factor_indx] #final "factor" index == E-vector index -# wt = deref(remainingWeight)[Ei] -# if remainingWeightTol == 0.0 or wt > remainingWeightTol: #if we need this "path" -# # In this loop, b holds "current" indices into factor_lists -# factor = deref(factor_lists[0])[b[0]] # the last factor (an Evec) -# coeff = factor._coeff -# cwt = factor._magnitude -# -# for i in range(1,nFactorLists): -# coeff *= deref(factor_lists[i])[b[i]]._coeff -# cwt *= deref(factor_lists[i])[b[i]]._magnitude -# -# #pLeft / "pre" sim -# factor = deref(factor_lists[0])[b[0]] # 0th-factor = rhoVec -# prop1.copy_from(factor._pre_state) -# for j in range(factor._pre_ops.size()): -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# for i in range(1,last_index): -# factor = deref(factor_lists[i])[b[i]] -# for j in range(factor._pre_ops.size()): -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# factor = deref(factor_lists[last_index])[b[last_index]] # the last factor (an Evec) -# -# # can't propagate effects, so effect's post_ops are constructed to act on *state* -# EVec = factor._post_effect -# for j in range(factor._post_ops.size()): -# rhoVec = factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# pLeft = EVec.amplitude(prop1) -# -# #pRight / "post" sim -# factor = deref(factor_lists[0])[b[0]] # 0th-factor = rhoVec -# prop1.copy_from(factor._post_state) -# for j in range(factor._post_ops.size()): -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# for i in range(1,last_index): -# factor = deref(factor_lists[i])[b[i]] -# for j in range(factor._post_ops.size()): -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# factor = deref(factor_lists[last_index])[b[last_index]] # the last factor (an Evec) -# -# EVec = factor._pre_effect -# for j in range(factor._pre_ops.size()): -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # final state in prop1 -# pRight = EVec.amplitude(prop1).conjugate() -# -# #Add result to appropriate polynomial -# result = coeff * pLeft * pRight -# deref(prs)[Ei] = deref(prs)[Ei] + result #TODO - see why += doesn't work here -# deref(remainingWeight)[Ei] = wt - cwt # "weight" of this path -# nPaths += 1 # just for debuggins -# -# #increment b ~ itertools.product & update vec_index_noop = np.dot(self.multipliers, b) -# for i in range(nFactorLists-1,-1,-1): -# if b[i]+1 < factorListLens[i]: -# b[i] += 1 -# break -# else: -# b[i] = 0 -# else: -# break # can't increment anything - break while(True) loop -# -# #Clenaup: free allocated memory -# del prop1 -# del prop2 -# free(factorListLens) -# free(b) -# return nPaths -# -# -#cdef INT pr_directly_innerloop_savepartials(vector[vector_TermDirectCRep_ptr_ptr] factor_lists, -# vector[INT]* Einds, vector[DCOMPLEX]* prs, INT dim, -# vector[double]* remainingWeight, double remainingWeightTol): -# #print("DB partition = ","listlens = ",[len(fl) for fl in factor_lists]) -# -# cdef INT i,j,Ei -# cdef double complex scale, val, newval, pLeft, pRight, p -# -# cdef INT incd -# cdef TermDirectCRep* factor -# -# cdef INT nFactorLists = factor_lists.size() # may need to recompute this after fast-mode -# cdef INT* factorListLens = malloc(nFactorLists * sizeof(INT)) -# cdef INT last_index = nFactorLists-1 -# -# for i in range(nFactorLists): -# factorListLens[i] = factor_lists[i].size() -# if factorListLens[i] == 0: -# free(factorListLens) -# return 0 # nothing to loop over! (exit before we allocate anything else) -# -# cdef double complex coeff -# cdef double complex result -# -# #fast mode -# cdef vector[StateCRep*] leftSaved = vector[StateCRep_ptr](nFactorLists-1) # saved[i] is state after i-th -# cdef vector[StateCRep*] rightSaved = vector[StateCRep_ptr](nFactorLists-1) # factor has been applied -# cdef vector[DCOMPLEX] coeffSaved = vector[DCOMPLEX](nFactorLists-1) -# cdef StateCRep *shelved = new StateCRep(dim) -# cdef StateCRep *prop2 = new StateCRep(dim) # prop2 is always a temporary allocated state not owned by anything else -# cdef StateCRep *prop1 -# cdef StateCRep *tprop -# cdef EffectCRep* EVec -# -# cdef INT* b = malloc(nFactorLists * sizeof(INT)) -# for i in range(nFactorLists): b[i] = 0 -# assert(nFactorLists > 0), "Number of factor lists must be > 0!" -# -# incd = 0 -# -# #Fill saved arrays with allocated states -# for i in range(nFactorLists-1): -# leftSaved[i] = new StateCRep(dim) -# rightSaved[i] = new StateCRep(dim) -# -# #for factors in _itertools.product(*factor_lists): -# #for incd,fi in incd_product(*[range(len(l)) for l in factor_lists]): -# while(True): -# # In this loop, b holds "current" indices into factor_lists -# #print "DB: iter-product BEGIN" -# -# if incd == 0: # need to re-evaluate rho vector -# #print "DB: re-eval at incd=0" -# factor = deref(factor_lists[0])[b[0]] -# -# #print "DB: re-eval left" -# prop1 = leftSaved[0] # the final destination (prop2 is already alloc'd) -# prop1.copy_from(factor._pre_state) -# for j in range(factor._pre_ops.size()): -# #print "DB: re-eval left item" -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # swap prop1 <-> prop2 -# rhoVecL = prop1 -# leftSaved[0] = prop1 # final state -> saved -# # (prop2 == the other allocated state) -# -# #print "DB: re-eval right" -# prop1 = rightSaved[0] # the final destination (prop2 is already alloc'd) -# prop1.copy_from(factor._post_state) -# for j in range(factor._post_ops.size()): -# #print "DB: re-eval right item" -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop # swap prop1 <-> prop2 -# rhoVecR = prop1 -# rightSaved[0] = prop1 # final state -> saved -# # (prop2 == the other allocated state) -# -# #print "DB: re-eval coeff" -# coeff = factor._coeff -# coeffSaved[0] = coeff -# incd += 1 -# else: -# #print "DB: init from incd" -# rhoVecL = leftSaved[incd-1] -# rhoVecR = rightSaved[incd-1] -# coeff = coeffSaved[incd-1] -# -# # propagate left and right states, saving as we go -# for i in range(incd,last_index): -# #print "DB: propagate left begin" -# factor = deref(factor_lists[i])[b[i]] -# prop1 = leftSaved[i] # destination -# prop1.copy_from(rhoVecL) #starting state -# for j in range(factor._pre_ops.size()): -# #print "DB: propagate left item" -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# rhoVecL = prop1 -# leftSaved[i] = prop1 -# # (prop2 == the other allocated state) -# -# #print "DB: propagate right begin" -# prop1 = rightSaved[i] # destination -# prop1.copy_from(rhoVecR) #starting state -# for j in range(factor._post_ops.size()): -# #print "DB: propagate right item" -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# rhoVecR = prop1 -# rightSaved[i] = prop1 -# # (prop2 == the other allocated state) -# -# #print "DB: propagate coeff mult" -# coeff *= factor._coeff -# coeffSaved[i] = coeff -# -# # for the last index, no need to save, and need to construct -# # and apply effect vector -# prop1 = shelved # so now prop1 (and prop2) are alloc'd states -# -# #print "DB: left ampl" -# factor = deref(factor_lists[last_index])[b[last_index]] # the last factor (an Evec) -# EVec = factor._post_effect -# prop1.copy_from(rhoVecL) # initial state (prop2 already alloc'd) -# for j in range(factor._post_ops.size()): -# factor._post_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# pLeft = EVec.amplitude(prop1) # output in prop1, so this is final amplitude -# -# #print "DB: right ampl" -# EVec = factor._pre_effect -# prop1.copy_from(rhoVecR) -# for j in range(factor._pre_ops.size()): -# factor._pre_ops[j].acton(prop1,prop2) -# tprop = prop1; prop1 = prop2; prop2 = tprop -# pRight = EVec.amplitude(prop1).conjugate() -# -# shelved = prop1 # return prop1 to the "shelf" since we'll use prop1 for other things next -# -# #print "DB: final block" -# #print "DB running coeff = ",dict(coeff._coeffs) -# #print "DB factor coeff = ",dict(factor._coeff._coeffs) -# result = coeff * factor._coeff -# #print "DB result = ",dict(result._coeffs) -# result *= pLeft * pRight -# final_factor_indx = b[last_index] -# Ei = deref(Einds)[final_factor_indx] #final "factor" index == E-vector index -# deref(prs)[Ei] += result -# #print "DB prs[",INT(Ei),"] = ",dict(deref(prs)[Ei]._coeffs) -# -# #assert(debug < 100) #DEBUG -# #print "DB: end product loop" -# -# #increment b ~ itertools.product & update vec_index_noop = np.dot(self.multipliers, b) -# for i in range(nFactorLists-1,-1,-1): -# if b[i]+1 < factorListLens[i]: -# b[i] += 1; incd = i -# break -# else: -# b[i] = 0 -# else: -# break # can't increment anything - break while(True) loop -# -# #Cleanup: free allocated memory -# for i in range(nFactorLists-1): -# del leftSaved[i] -# del rightSaved[i] -# del prop2 -# del shelved -# free(factorListLens) -# free(b) -# return 0 #TODO: fix nPaths - diff --git a/pygsti/io/mongodb.py b/pygsti/io/mongodb.py index 1a3004157..3991a417c 100644 --- a/pygsti/io/mongodb.py +++ b/pygsti/io/mongodb.py @@ -158,7 +158,6 @@ def read_auxtree_from_mongodb_doc(mongodb, doc, auxfile_types_member='auxfile_ty def _load_auxdoc_member(mongodb, member_name, typ, metadata, quick_load): - from pymongo import ASCENDING, DESCENDING subtypes = typ.split(':') cur_typ = subtypes[0] next_typ = ':'.join(subtypes[1:]) @@ -809,7 +808,6 @@ def remove_auxtree_from_mongodb(mongodb, collection_name, doc_id, auxfile_types_ def _remove_auxdoc_member(mongodb, member_name, typ, metadata, session, recursive): - from pymongo import ASCENDING, DESCENDING subtypes = typ.split(':') cur_typ = subtypes[0] next_typ = ':'.join(subtypes[1:]) diff --git a/pygsti/layouts/copalayout.py b/pygsti/layouts/copalayout.py index bd5020aa8..34e907d8f 100644 --- a/pygsti/layouts/copalayout.py +++ b/pygsti/layouts/copalayout.py @@ -600,10 +600,6 @@ def fill_jtj(self, j, jtj): """ jtj[:] = _np.dot(j.T, j) - #Not needed - #def allocate_jtj_shared_mem_buf(self): - # return _np.empty((self._param_dimensions[0], self._param_dimensions[0]), 'd'), None - def memory_estimate(self, array_type, dtype='d'): """ Memory required to allocate an array of a given type (in bytes). diff --git a/pygsti/layouts/distlayout.py b/pygsti/layouts/distlayout.py index 9db1150d8..16ae93957 100644 --- a/pygsti/layouts/distlayout.py +++ b/pygsti/layouts/distlayout.py @@ -807,7 +807,6 @@ def __init__(self, circuits, unique_circuits, to_unique, unique_complete_circuit super().__init__(local_circuits, local_unique_circuits, local_to_unique, local_elindex_outcome_tuples, local_unique_complete_circuits, param_dimensions, resource_alloc) - @property def max_atom_elements(self): """ The most elements owned by a single atom. """ diff --git a/pygsti/layouts/maplayout.py b/pygsti/layouts/maplayout.py index 39a58fb08..da57444ae 100644 --- a/pygsti/layouts/maplayout.py +++ b/pygsti/layouts/maplayout.py @@ -325,4 +325,4 @@ def _create_atom(group): unique_to_orig = {unique_i: orig_i for orig_i, unique_i in self._to_unique.items()} # unique => orig. indices for atom in self.atoms: for expanded_circuit_i, unique_i in atom.unique_indices_by_expcircuit.items(): - atom.orig_indices_by_expcircuit[expanded_circuit_i] = unique_to_orig[unique_i] \ No newline at end of file + atom.orig_indices_by_expcircuit[expanded_circuit_i] = unique_to_orig[unique_i] diff --git a/pygsti/layouts/matrixlayout.py b/pygsti/layouts/matrixlayout.py index bfff25a31..a37f4bc21 100644 --- a/pygsti/layouts/matrixlayout.py +++ b/pygsti/layouts/matrixlayout.py @@ -323,7 +323,6 @@ def __init__(self, circuits, model, dataset=None, num_sub_trees=None, num_tree_p unique_complete_circuits.append(comp_ckt) else: unique_complete_circuits.append(model.complete_circuit(c)) - #Note: "unique" means a unique circuit *before* circuit-completion, so there could be duplicate # "unique circuits" after completion, e.g. "rho0Gx" and "Gx" could both complete to "rho0GxMdefault_0". diff --git a/pygsti/modelmembers/modelmember.py b/pygsti/modelmembers/modelmember.py index 27e36e692..43dcdc94e 100644 --- a/pygsti/modelmembers/modelmember.py +++ b/pygsti/modelmembers/modelmember.py @@ -340,23 +340,6 @@ def unlink_parent(self, force=False): if (self.parent is not None) and (force or self.parent._obj_refcount(self) == 0): self._parent = None - # UNUSED - as this doesn't mark parameter for reallocation like it used to - #def clear_gpindices(self): - # """ - # Sets gpindices to None, along with any submembers' gpindices. - # - # This essentially marks these members for parameter re-allocation - # (e.g. if the number - not just the value - of parameters they have - # changes). - # - # Returns - # ------- - # None - # """ - # for subm in self.submembers(): - # subm.clear_gpindices() - # self._gpindices = None - def set_gpindices(self, gpindices, parent, memo=None): """ Set the parent and indices into the parent's parameter vector that are used by this ModelMember object. diff --git a/pygsti/modelmembers/operations/composedop.py b/pygsti/modelmembers/operations/composedop.py index 9990c1669..4e1c50101 100644 --- a/pygsti/modelmembers/operations/composedop.py +++ b/pygsti/modelmembers/operations/composedop.py @@ -491,10 +491,6 @@ def _compute_taylor_order_terms(self, order, max_polynomial_vars, gpindices_arra self.terms[order] = terms - #def _decompose_indices(x): - # return tuple(_modelmember._decompose_gpindices( - # self.gpindices, _np.array(x, _np.int64))) - mapvec = _np.ascontiguousarray(_np.zeros(max_polynomial_vars, _np.int64)) for ii, i in enumerate(gpindices_array): mapvec[i] = ii @@ -555,25 +551,6 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) if mag >= min_term_mag: terms.append(_term.compose_terms_with_mag(factors, mag)) return terms - #def _decompose_indices(x): - # return tuple(_modelmember._decompose_gpindices( - # self.gpindices, _np.array(x, _np.int64))) - # - #mapvec = _np.ascontiguousarray(_np.zeros(max_polynomial_vars,_np.int64)) - #for ii,i in enumerate(self.gpindices_as_array()): - # mapvec[i] = ii - # - ##poly_coeffs = [t.coeff.map_indices(_decompose_indices) for t in terms] # with *local* indices - #poly_coeffs = [t.coeff.mapvec_indices(mapvec) for t in terms] # with *local* indices - #tapes = [poly.compact(complex_coeff_tape=True) for poly in poly_coeffs] - #if len(tapes) > 0: - # vtape = _np.concatenate([t[0] for t in tapes]) - # ctape = _np.concatenate([t[1] for t in tapes]) - #else: - # vtape = _np.empty(0, _np.int64) - # ctape = _np.empty(0, complex) - #coeffs_as_compact_polys = (vtape, ctape) - #self.local_term_poly_coeffs[order] = coeffs_as_compact_polys @property def total_term_magnitude(self): diff --git a/pygsti/modelmembers/operations/lindbladcoefficients.py b/pygsti/modelmembers/operations/lindbladcoefficients.py index 6400812f0..b5a430ad6 100644 --- a/pygsti/modelmembers/operations/lindbladcoefficients.py +++ b/pygsti/modelmembers/operations/lindbladcoefficients.py @@ -854,7 +854,6 @@ def from_vector(self, v): else: raise ValueError("Internal error: invalid block type!") - #def paramvals_to_coefficients_deriv(self, parameter_values, cache_mx=None): def deriv_wrt_params(self, v=None): """ Construct derivative of Lindblad coefficients (for this block) from a set of parameter values. diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index 4e264e392..59390dac5 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -698,7 +698,6 @@ def to_sparse(self, on_space='minimal'): else: # dense rep return _sps.csr_matrix(self.to_dense(on_space)) - def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False): """ Get the `order`-th order Taylor-expansion terms of this operation. @@ -747,11 +746,6 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= self._rep.Lterms, self._rep.Lterm_coeffs = self._init_terms(Lblocks, max_polynomial_vars) return self._rep.Lterms # terms with local-index polynomial coefficients - #def get_direct_order_terms(self, order): # , order_base=None - unused currently b/c order is always 0... - # v = self.to_vector() - # poly_terms = self.get_taylor_order_terms(order) - # return [ term.evaluate_coeff(v) for term in poly_terms ] - @property def total_term_magnitude(self): """ @@ -1249,7 +1243,6 @@ def transform_inplace(self, s): else: raise ValueError("Invalid transform for this LindbladErrorgen: type %s" % str(type(s))) - def deriv_wrt_params(self, wrt_filter=None): """ diff --git a/pygsti/modelmembers/operations/linearop.py b/pygsti/modelmembers/operations/linearop.py index 6eb9bbdd6..c86352a9f 100644 --- a/pygsti/modelmembers/operations/linearop.py +++ b/pygsti/modelmembers/operations/linearop.py @@ -128,28 +128,6 @@ def set_time(self, t): """ pass - #def rep_at_time(self, t): - # """ - # Retrieves a representation of this operator at time `t`. - # - # This is operationally equivalent to calling `self.set_time(t)` and - # then retrieving `self._rep`. However, what is returned from this function - # need not be the same rep object for different times, allowing the - # operator object to cache many reps for different times to increase performance - # (this avoids having to initialize the same rep at a given time). - # - # Parameters - # ---------- - # t : float - # The time. - # - # Returns - # ------- - # object - # """ - # self.set_time(t) - # return self._rep - def to_dense(self, on_space='minimal'): """ Return this operation as a dense matrix. diff --git a/pygsti/modelmembers/operations/repeatedop.py b/pygsti/modelmembers/operations/repeatedop.py index 76ca647ed..4f297a33e 100644 --- a/pygsti/modelmembers/operations/repeatedop.py +++ b/pygsti/modelmembers/operations/repeatedop.py @@ -113,29 +113,6 @@ def to_dense(self, on_space='minimal'): op = self.repeated_op.to_dense(on_space) return _np.linalg.matrix_power(op, self.num_repetitions) - #def torep(self): - # """ - # Return a "representation" object for this operation. - # - # Such objects are primarily used internally by pyGSTi to compute - # things like probabilities more efficiently. - # - # Returns - # ------- - # OpRep - # """ - # if self._evotype == "densitymx": - # return replib.DMOpRepExponentiated(self.repeated_op.torep(), self.power, self.dim) - # elif self._evotype == "statevec": - # return replib.SVOpRepExponentiated(self.repeated_op.torep(), self.power, self.dim) - # elif self._evotype == "stabilizer": - # nQubits = int(round(_np.log2(self.dim))) # "stabilizer" is a unitary-evolution type mode - # return replib.SVOpRepExponentiated(self.repeated_op.torep(), self.power, nQubits) - # assert(False), "Invalid internal _evotype: %s" % self._evotype - - #FUTURE: term-related functions (maybe base off of ComposedOp or use a composedop to generate them?) - # e.g. ComposedOp([self.repeated_op] * power, dim, evotype) - @property def parameter_labels(self): """ diff --git a/pygsti/modelmembers/povms/basepovm.py b/pygsti/modelmembers/povms/basepovm.py index 0ec92bfa3..f1dd92d12 100644 --- a/pygsti/modelmembers/povms/basepovm.py +++ b/pygsti/modelmembers/povms/basepovm.py @@ -172,40 +172,6 @@ def _from_memoized_dict(cls, mm_dict, serial_memo): for lbl, subm_serial_id in zip(mm_dict['effect_labels'], mm_dict['submembers'])} return cls(effects, mm_dict['evotype'], state_space) # Note: __init__ call signature of derived classes - #def _reset_member_gpindices(self): - # """ - # Sets gpindices for all non-complement items. Assumes all non-complement - # vectors have *independent* parameters (for now). - # """ - # Np = 0 - # for k, effect in self.items(): - # if k == self.complement_label: continue - # N = effect.num_params - # pslc = slice(Np, Np + N) - # if effect.gpindices != pslc: - # effect.set_gpindices(pslc, self) - # Np += N - # self.Np = Np - # - #def _rebuild_complement(self, identity_for_complement=None): - # """ Rebuild complement vector (in case other vectors have changed) """ - # - # if self.complement_label is not None and self.complement_label in self: - # non_comp_effects = [v for k, v in self.items() - # if k != self.complement_label] - # - # if identity_for_complement is None: - # identity_for_complement = self[self.complement_label].identity - # - # complement_effect = _ComplementPOVMEffect( - # identity_for_complement, non_comp_effects) - # complement_effect.set_gpindices(slice(0, self.Np), self) # all parameters - # - # #Assign new complement effect without calling our __setitem__ - # old_ro = self._readonly; self._readonly = False - # _POVM.__setitem__(self, self.complement_label, complement_effect) - # self._readonly = old_ro - def __setitem__(self, key, value): if not self._readonly: # when readonly == False, we're initializing return super(_BasePOVM, self).__setitem__(key, value) diff --git a/pygsti/modelmembers/povms/marginalizedpovm.py b/pygsti/modelmembers/povms/marginalizedpovm.py index a56595287..9d8dd3029 100644 --- a/pygsti/modelmembers/povms/marginalizedpovm.py +++ b/pygsti/modelmembers/povms/marginalizedpovm.py @@ -207,85 +207,6 @@ def __reduce__(self): self.sslbls_after_marginalizing), {'_gpindices': self._gpindices}) # preserve gpindices (but not parent) - #May need to implement this in future if we allow non-static MarginalizedPOVMs - #def allocate_gpindices(self, starting_index, parent, memo=None): - # """ - # Sets gpindices array for this object or any objects it - # contains (i.e. depends upon). Indices may be obtained - # from contained objects which have already been initialized - # (e.g. if a contained object is shared with other - # top-level objects), or given new indices starting with - # `starting_index`. - # - # Parameters - # ---------- - # starting_index : int - # The starting index for un-allocated parameters. - # - # parent : Model or ModelMember - # The parent whose parameter array gpindices references. - # - # memo : set, optional - # Used to prevent duplicate calls and self-referencing loops. If - # `memo` contains an object's id (`id(self)`) then this routine - # will exit immediately. - # - # Returns - # ------- - # num_new: int - # The number of *new* allocated parameters (so - # the parent should mark as allocated parameter - # indices `starting_index` to `starting_index + new_new`). - # """ - # if memo is None: memo = set() - # if id(self) in memo: return 0 - # memo.add(id(self)) - # - # assert(self.base_povm.num_params == 0) # so no need to do anything w/base_povm - # num_new_params = self.error_map.allocate_gpindices(starting_index, parent, memo) # *same* parent as self - # _mm.ModelMember.set_gpindices( - # self, self.error_map.gpindices, parent) - # return num_new_params - - #def relink_parent(self, parent): # Unnecessary? - # """ - # Sets the parent of this object *without* altering its gpindices. - # - # In addition to setting the parent of this object, this method - # sets the parent of any objects this object contains (i.e. - # depends upon) - much like allocate_gpindices. To ensure a valid - # parent is not overwritten, the existing parent *must be None* - # prior to this call. - # """ - # self.povm_to_marginalize.relink_parent(parent) - # _mm.ModelMember.relink_parent(self, parent) - - #def set_gpindices(self, gpindices, parent, memo=None): - # """ - # Set the parent and indices into the parent's parameter vector that - # are used by this ModelMember object. - # - # Parameters - # ---------- - # gpindices : slice or integer ndarray - # The indices of this objects parameters in its parent's array. - # - # parent : Model or ModelMember - # The parent whose parameter array gpindices references. - # - # Returns - # ------- - # None - # """ - # if memo is None: memo = set() - # elif id(self) in memo: return - # memo.add(id(self)) - # - # assert(self.base_povm.num_params == 0) # so no need to do anything w/base_povm - # self.error_map.set_gpindices(gpindices, parent, memo) - # self.terms = {} # clear terms cache since param indices have changed now - # _mm.ModelMember._set_only_my_gpindices(self, gpindices, parent) - def simplify_effects(self, prefix=""): """ Creates a dictionary of simplified effect vectors. diff --git a/pygsti/modelmembers/states/fullpurestate.py b/pygsti/modelmembers/states/fullpurestate.py index 771ebd81f..a3da3cc5e 100644 --- a/pygsti/modelmembers/states/fullpurestate.py +++ b/pygsti/modelmembers/states/fullpurestate.py @@ -44,30 +44,6 @@ def __init__(self, purevec, basis="pp", evotype="default", state_space=None): self._paramlbls = _np.array(["VecElement Re(%d)" % i for i in range(self.state_space.udim)] + ["VecElement Im(%d)" % i for i in range(self.state_space.udim)], dtype=object) - #REMOVE (Cannot set to arbitrary vector) - but maybe could set to pure vector? - #def set_dense(self, vec): - # """ - # Set the dense-vector value of this SPAM vector. - # - # Attempts to modify this SPAM vector's parameters so that the raw - # SPAM vector becomes `vec`. Will raise ValueError if this operation - # is not possible. - # - # Parameters - # ---------- - # vec : array_like or State - # A numpy array representing a SPAM vector, or a State object. - # - # Returns - # ------- - # None - # """ - # vec = State._to_vector(vec) - # if(vec.size != self.dim): - # raise ValueError("Argument must be length %d" % self.dim) - # self._ptr[:] = vec - # self.dirty = True - @property def num_params(self): """ diff --git a/pygsti/modelmembers/term.py b/pygsti/modelmembers/term.py index 3023236e9..85d1eed3e 100644 --- a/pygsti/modelmembers/term.py +++ b/pygsti/modelmembers/term.py @@ -718,9 +718,6 @@ def coeff(self): """ return _Polynomial.from_rep(self._rep.coeff) - #def _coeff_copy(self): - # return self.coeff.copy() - def map_indices_inplace(self, mapfn): """ Performs a bulk find & replace on the coefficient polynomial's variable indices. diff --git a/pygsti/modelpacks/stdtarget.py b/pygsti/modelpacks/stdtarget.py index 20f3587f8..9da24f5ae 100644 --- a/pygsti/modelpacks/stdtarget.py +++ b/pygsti/modelpacks/stdtarget.py @@ -42,127 +42,6 @@ def _get_cachefile_names(std_module, param_type, simulator, py_version): raise ValueError("No cache files used for param-type=%s" % param_type) -# XXX is this used? -# def _make_hs_cache_for_std_model(std_module, term_order, max_length, json_too=False, comm=None): -# """ -# A utility routine to for creating the term-based cache files for a standard module -# """ -# target_model = std_module.target_model() -# prep_fiducials = std_module.prepStrs -# effect_fiducials = std_module.effectStrs -# germs = std_module.germs -# -# x = 1 -# maxLengths = [] -# while(x <= max_length): -# maxLengths.append(x) -# x *= 2 -# -# listOfExperiments = _stdlists.create_lsgst_circuits( -# target_model, prep_fiducials, effect_fiducials, germs, maxLengths) -# -# mdl_terms = target_model.copy() -# mdl_terms.set_all_parameterizations("H+S terms") # CPTP terms? -# my_calc_cache = {} -# mdl_terms.sim = _TermFSim(mode="taylor", max_order=term_order, cache=my_calc_cache) -# -# comm_method = "scheduler" -# if comm is not None and comm.Get_size() > 1 and comm_method == "scheduler": -# from mpi4py import MPI # just needed for MPI.SOURCE below -# -# #Alternate: use rank0 as "scheduler" -# rank = 0 if (comm is None) else comm.Get_rank() -# nprocs = 1 if (comm is None) else comm.Get_size() -# N = len(listOfExperiments); cur_index = 0; active_workers = nprocs - 1 -# buf = _np.zeros(1, _np.int64) # use buffer b/c mpi4py .send/.recv seem buggy -# if rank == 0: -# # ** I am the scheduler ** -# # Give each "worker" rank an initial index to compute -# for i in range(1, nprocs): -# if cur_index == N: # there are more procs than items - just send -1 index to mean "you're done!" -# buf[0] = -1 -# comm.Send(buf, dest=i, tag=1) # tag == 1 => scheduler to worker -# active_workers -= 1 -# else: -# buf[0] = cur_index -# comm.Send(buf, dest=i, tag=1); cur_index += 1 -# -# # while there are active workers keep dishing out indices -# while active_workers > 0: -# comm.Recv(buf, source=MPI.ANY_SOURCE, tag=2) # worker requesting assignment -# worker_rank = buf[0] -# if cur_index == N: # nothing more to do: just send -1 index to mean "you're done!" -# buf[0] = -1 -# comm.Send(buf, dest=worker_rank, tag=1) # tag == 1 => scheduler to worker -# active_workers -= 1 -# else: -# buf[0] = cur_index -# comm.Send(buf, dest=worker_rank, tag=1) -# cur_index += 1 -# -# else: -# # ** I am a worker ** -# comm.Recv(buf, source=0, tag=1) -# index_to_compute = buf[0] -# -# while index_to_compute >= 0: -# print("Worker %d computing prob %d of %d" % (rank, index_to_compute, N)) -# t0 = _time.time() -# mdl_terms.probabilities(listOfExperiments[index_to_compute]) -# print("Worker %d finished computing prob %d in %.2fs" % (rank, index_to_compute, _time.time() - t0)) -# -# buf[0] = rank -# comm.Send(buf, dest=0, tag=2) # tag == 2 => worker requests next assignment -# comm.Recv(buf, source=0, tag=1) -# index_to_compute = buf[0] -# -# print("Rank %d at barrier" % rank) -# comm.barrier() # wait here until all workers and scheduler are done -# -# else: -# -# #divide up strings among ranks -# my_expList, _, _ = _mpit.distribute_indices(listOfExperiments, comm, False) -# rankStr = "" if (comm is None) else "Rank%d: " % comm.Get_rank() -# -# if comm is not None and comm.Get_rank() == 0: -# print("%d circuits divided among %d processors" % (len(listOfExperiments), comm.Get_size())) -# -# t0 = _time.time() -# for i, opstr in enumerate(my_expList): -# print("%s%.2fs: Computing prob %d of %d" % (rankStr, _time.time() - t0, i, len(my_expList))) -# mdl_terms.probabilities(opstr) -# #mdl_terms.bulk_probs(my_expList) # also fills cache, but allocs more mem at once -# -# py_version = 3 if (_sys.version_info > (3, 0)) else 2 -# key_fn, val_fn = _get_cachefile_names(std_module, "H+S terms", -# "termorder:%d" % term_order, py_version) -# _write_calccache(my_calc_cache, key_fn, val_fn, json_too, comm) -# -# if comm is None or comm.Get_rank() == 0: -# print("Completed in %.2fs" % (_time.time() - t0)) -# print("Num of Experiments = ", len(listOfExperiments)) -# -# #if comm is None: -# # calcc_list = [ my_calc_cache ] -# #else: -# # calcc_list = comm.gather(my_calc_cache, root=0) -# # -# #if comm is None or comm.Get_rank() == 0: -# # calc_cache = {} -# # for c in calcc_list: -# # calc_cache.update(c) -# # -# # print("Completed in %.2fs" % (_time.time()-t0)) -# # print("Cachesize = ",len(calc_cache)) -# # print("Num of Experiments = ", len(listOfExperiments)) -# # -# # py_version = 3 if (_sys.version_info > (3, 0)) else 2 -# # key_fn, val_fn = _get_cachefile_names(std_module, "H+S terms", -# # "termorder:%d" % term_order,py_version) -# # _write_calccache(calc_cache, key_fn, val_fn, json_too, comm) - - # XXX apparently only used from _make_hs_cache_for_std_model which itself looks unused def _write_calccache(calc_cache, key_fn, val_fn, json_too=False, comm=None): """ diff --git a/pygsti/models/explicitmodel.py b/pygsti/models/explicitmodel.py index 2faa9c955..bae28bbd8 100644 --- a/pygsti/models/explicitmodel.py +++ b/pygsti/models/explicitmodel.py @@ -186,16 +186,6 @@ def _excalc(self): return _explicitcalc.ExplicitOpModelCalc(self.state_space.dim, simplified_preps, simplified_ops, simplified_effects, self.num_params, self._param_interposer) - #Unneeded - just use string processing & rely on effect labels *not* having underscores in them - #def simplify_spamtuple_to_outcome_label(self, simplified_spamTuple): - # #TODO: make this more efficient (prep lbl isn't even used!) - # for prep_lbl in self.preps: - # for povm_lbl in self.povms: - # for elbl in self.povms[povm_lbl]: - # if simplified_spamTuple == (prep_lbl, povm_lbl + "_" + elbl): - # return (elbl,) # outcome "label" (a tuple) - # raise ValueError("No outcome label found for simplified spam_tuple: ", simplified_spamTuple) - def _embed_operation(self, op_target_labels, op_val, force=False): """ Called by OrderedMemberDict._auto_embed to create an embedded-gate diff --git a/pygsti/models/memberdict.py b/pygsti/models/memberdict.py index 833c389b0..59f923d88 100644 --- a/pygsti/models/memberdict.py +++ b/pygsti/models/memberdict.py @@ -45,18 +45,6 @@ def __setitem__(self, key, val): "beginning with the prefix '%s'" % self._prefix) super(_PrefixOrderedDict, self).__setitem__(key, val) - #Handled by derived classes - #def __reduce__(self): - # items = [(k,v) for k,v in self.iteritems()] - # return (_PrefixOrderedDict, (self._prefix, items), None) - - """ - An ordered dictionary whose keys must begin with a given prefix, - and which holds LinearOperator objects. This class ensures that every value is a - :class:`LinearOperator`-derived object by converting any non-`LinearOperator` values into - `LinearOperator`s upon assignment and raising an error if this is not possible. - """ - class OrderedMemberDict(_PrefixOrderedDict, _mm.ModelChild): """ diff --git a/pygsti/models/model.py b/pygsti/models/model.py index f4496788a..6dd0016d8 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -657,6 +657,56 @@ def num_modeltest_params(self): self._clean_paramvec() return Model.num_modeltest_params.fget(self) + @property + def parameter_labels(self): + """ + A list of labels, usually of the form `(op_label, string_description)` describing this model's parameters. + """ + self._clean_paramvec() + return self._ops_paramlbls_to_model_paramlbls(self._paramlbls) + + def set_parameter_label(self, index, label): + """ + Set the label of a single model parameter. + + Parameters + ---------- + index : int + The index of the paramter whose label should be set. + + label : object + An object that serves to label this parameter. Often a string. + + Returns + ------- + None + """ + self._clean_paramvec() + self._paramlbls[index] = label + + @property + def parameter_bounds(self): + """ Upper and lower bounds on the values of each parameter, utilized by optimization routines """ + self._clean_paramvec() + return self._param_bounds + + @property + def num_modeltest_params(self): + """ + The parameter count to use when testing this model against data. + + Often times, this is the same as :meth:`num_params`, but there are times + when it can convenient or necessary to use a parameter count different than + the actual number of parameters in this model. + + Returns + ------- + int + the number of model parameters. + """ + self._clean_paramvec() + return Model.num_modeltest_params.fget(self) + def _iter_parameterized_objs(self): raise NotImplementedError("Derived Model classes should implement _iter_parameterized_objs") #return # default is to have no parameterized objects diff --git a/pygsti/objectivefns/objectivefns.py b/pygsti/objectivefns/objectivefns.py index 70ca185cf..0c44fe74c 100644 --- a/pygsti/objectivefns/objectivefns.py +++ b/pygsti/objectivefns/objectivefns.py @@ -112,9 +112,6 @@ def _objfn(objfn_cls, model, dataset, circuits=None, return ofn - #def __len__(self): - # return len(self.circuits) - class ObjectiveFunctionBuilder(_NicelySerializable): """ @@ -1461,96 +1458,6 @@ def approximate_hessian(self, paramvec=None): """ raise NotImplementedError("Derived classes should implement this!") - #MOVED - but these versions have updated names - #def _persistent_memory_estimate(self, num_elements=None): - # # Estimate & check persistent memory (from allocs within objective function) - # """ - # Compute the amount of memory needed to perform evaluations of this objective function. - # - # This number includes both intermediate and final results, and assumes - # that the types of evauations given by :meth:`_evaltree_subcalls` - # are required. - # - # Parameters - # ---------- - # num_elements : int, optional - # The number of elements (circuit outcomes) that will be computed. - # - # Returns - # ------- - # int - # """ - # if num_elements is None: - # nout = int(round(_np.sqrt(self.mdl.dim))) # estimate of avg number of outcomes per string - # nc = len(self.circuits) - # ne = nc * nout # estimate of the number of elements (e.g. probabilities, # LS terms, etc) to compute - # else: - # ne = num_elements - # np = self.mdl.num_params - # - # # "persistent" memory is that used to store the final results. - # obj_fn_mem = FLOATSIZE * ne - # jac_mem = FLOATSIZE * ne * np - # hess_mem = FLOATSIZE * ne * np**2 - # persistent_mem = 4 * obj_fn_mem + jac_mem # 4 different objective-function sized arrays, 1 jacobian array? - # if any([nm == "bulk_fill_hprobs" for nm in self._evaltree_subcalls()]): - # persistent_mem += hess_mem # we need room for the hessian too! - # # TODO: what about "bulk_hprobs_by_block"? - # - # return persistent_mem - # - #def _evaltree_subcalls(self): - # """ - # The types of calls that will be made to an evaluation tree. - # - # This information is used for memory estimation purposes. - # - # Returns - # ------- - # list - # """ - # calls = ["bulk_fill_probs", "bulk_fill_dprobs"] - # if self.enable_hessian: calls.append("bulk_fill_hprobs") - # return calls - # - #def num_data_params(self): - # """ - # The number of degrees of freedom in the data used by this objective function. - # - # Returns - # ------- - # int - # """ - # return self.dataset.degrees_of_freedom(self.ds_circuits, - # aggregate_times=not self.time_dependent) - - #def _precompute_omitted_freqs(self): - # """ - # Detect omitted frequences (assumed to be 0) so we can compute objective fn correctly - # """ - # self.firsts = []; self.indicesOfCircuitsWithOmittedData = [] - # for i, c in enumerate(self.circuits): - # lklen = _slct.length(self.lookup[i]) - # if 0 < lklen < self.mdl.compute_num_outcomes(c): - # self.firsts.append(_slct.to_array(self.lookup[i])[0]) - # self.indicesOfCircuitsWithOmittedData.append(i) - # if len(self.firsts) > 0: - # self.firsts = _np.array(self.firsts, 'i') - # self.indicesOfCircuitsWithOmittedData = _np.array(self.indicesOfCircuitsWithOmittedData, 'i') - # self.dprobs_omitted_rowsum = _np.empty((len(self.firsts), self.nparams), 'd') - # self.raw_objfn.printer.log("SPARSE DATA: %d of %d rows have sparse data" % - # (len(self.firsts), len(self.circuits))) - # else: - # self.firsts = None # no omitted probs - # - #def _compute_count_vectors(self): - # """ - # Ensure self.cache contains count and total-count vectors. - # """ - # if not self.cache.has_count_vectors(): - # self.cache.add_count_vectors(self.dataset, self.ds_circuits, self.circuit_weights) - # return self.cache.counts, self.cache.total_counts - def _construct_hessian(self, counts, total_counts, prob_clip_interval): """ Framework for constructing a hessian matrix row by row using a derived @@ -6506,10 +6413,6 @@ def __init__(self, logl_objective_fn, base_pt, wildcard): self.logl_objfn.resource_alloc.add_tracked_memory(self.logl_objfn.probs.size) self.probs = self.logl_objfn.probs.copy() - #def _default_evalpt(self): - # """The default point to evaluate functions at """ - # return self.wildcard_budget.to_vector() - #Mimic the underlying LogL objective def __getattr__(self, attr): return getattr(self.__dict__['logl_objfn'], attr) # use __dict__ so no chance for recursive __getattr__ diff --git a/pygsti/objectivefns/wildcardbudget.py b/pygsti/objectivefns/wildcardbudget.py index f036b1590..49e62ea53 100644 --- a/pygsti/objectivefns/wildcardbudget.py +++ b/pygsti/objectivefns/wildcardbudget.py @@ -143,11 +143,6 @@ def description(self): """ raise NotImplementedError("Derived classes must implement `description`") - #def compute_circuit_wildcard_budget(c, w_vec): - # #raise NotImplementedError("TODO!!!") - # #for now, assume w_vec is a length-1 vector - # return abs(w_vec[0]) * len(c) - def precompute_for_same_circuits(self, circuits): """ Compute a pre-computed quantity for speeding up circuit calculations. diff --git a/pygsti/optimize/customcg.py b/pygsti/optimize/customcg.py index de28fb304..a89f70d56 100644 --- a/pygsti/optimize/customcg.py +++ b/pygsti/optimize/customcg.py @@ -244,15 +244,3 @@ def _finite_diff_dfdx_and_bdflag(f, x, delta): #completely undefined return dfdx, bd - -#def f6(param): -# '''Schaffer's F6 function''' -# para = param*10 -# para = param[0:2] -# num = (sin(sqrt((para[0] * para[0]) + (para[1] * para[1])))) * \ -# (sin(sqrt((para[0] * para[0]) + (para[1] * para[1])))) - 0.5 -# denom = (1.0 + 0.001 * ((para[0] * para[0]) + (para[1] * para[1]))) * \ -# (1.0 + 0.001 * ((para[0] * para[0]) + (para[1] * para[1]))) -# f6 = 0.5 - (num/denom) -# errorf6 = 1 - f6 -# return f6, errorf6; diff --git a/pygsti/optimize/customlm.py b/pygsti/optimize/customlm.py index 86ed3dd27..c1cb81d0c 100644 --- a/pygsti/optimize/customlm.py +++ b/pygsti/optimize/customlm.py @@ -810,7 +810,6 @@ def dclip(ar): return ar reject_msg = "" if profiler: profiler.memory_check("custom_leastsq: after linsolve") if success: # linear solve succeeded - #dx = _hack_dx(obj_fn, x, dx, Jac, JTJ, JTf, f, norm_f) if damping_mode != 'adaptive': new_x[:] = x + dx @@ -1230,239 +1229,127 @@ def dclip(ar): return ar #return solution -def _hack_dx(obj_fn, x, dx, jac, jtj, jtf, f, norm_f): - #HACK1 - #if nRejects >= 2: - # dx = -(10.0**(1-nRejects))*x - # print("HACK - setting dx = -%gx!" % 10.0**(1-nRejects)) - # return dx - - #HACK2 - if True: - print("HACK2 - trying to find a good dx by iteratively stepping in each direction...") - - test_f = obj_fn(x + dx); cmp_normf = _np.dot(test_f, test_f) - print("Compare with suggested step => ", cmp_normf) - STEP = 0.0001 - - #import bpdb; bpdb.set_trace() - #gradient = -jtf - test_dx = _np.zeros(len(dx), 'd') - last_normf = norm_f - for ii in range(len(dx)): - - #Try adding - while True: - test_dx[ii] += STEP - test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f) - if test_normf < last_normf: - last_normf = test_normf +""" +def custom_leastsq_wikip(obj_fn, jac_fn, x0, f_norm_tol=1e-6, jac_norm_tol=1e-6, + rel_tol=1e-6, max_iter=100, comm=None, verbosity=0, profiler=None): + # + # Wikipedia-version of LM algorithm, testing mu and mu/nu damping params and taking + # mu/nu => new_mu if acceptable... This didn't seem to perform well, but maybe just + # needs some tweaking, so leaving it commented here for reference + # + msg = "" + converged = False + x = x0 + f = obj_fn(x) + norm_f = _np.linalg.norm(f) + tau = 1e-3 #initial mu + nu = 1.3 + my_cols_slice = None + + + if not _np.isfinite(norm_f): + msg = "Infinite norm of objective function at initial point!" + + for k in range(max_iter): #outer loop + # assume x, f, fnorm hold valid values + + if len(msg) > 0: + break #exit outer loop if an exit-message has been set + + if norm_f < f_norm_tol: + msg = "norm(objectivefn) is small" + converged = True; break + + if verbosity > 0: + print("--- Outer Iter %d: norm_f = %g" % (k,norm_f)) + + if profiler: profiler.mem_check("custom_leastsq: begin outer iter *before de-alloc*") + jac = None; jtj = None; jtf = None + + if profiler: profiler.mem_check("custom_leastsq: begin outer iter") + jac = jac_fn(x) + if profiler: profiler.mem_check("custom_leastsq: after jacobian:" + + "shape=%s, GB=%.2f" % (str(jac.shape), + jac.nbytes/(1024.0**3)) ) + + tm = _time.time() + if my_cols_slice is None: + my_cols_slice = _mpit.distribute_for_dot(jac.shape[0], comm) + jtj = _mpit.mpidot(jac.T,jac,my_cols_slice,comm) #_np.dot(jac.T,jac) + jtf = _np.dot(jac.T,f) + if profiler: profiler.add_time("custom_leastsq: dotprods",tm) + + idiag = _np.diag_indices_from(jtj) + norm_JTf = _np.linalg.norm(jtf) #, ord='inf') + norm_x = _np.linalg.norm(x) + undampled_JTJ_diag = jtj.diagonal().copy() + + if norm_JTf < jac_norm_tol: + msg = "norm(jacobian) is small" + converged = True; break + + if k == 0: + mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element + #mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element + + #determing increment using adaptive damping + while True: #inner loop + + ### Evaluate with mu' = mu / nu + mu = mu / nu + if profiler: profiler.mem_check("custom_leastsq: begin inner iter") + jtj[idiag] *= (1.0 + mu) # augment normal equations + #jtj[idiag] += mu # augment normal equations + + try: + if profiler: profiler.mem_check("custom_leastsq: before linsolve") + tm = _time.time() + success = True + dx = _np.linalg.solve(jtj, -jtf) + if profiler: profiler.add_time("custom_leastsq: linsolve",tm) + except _np.linalg.LinAlgError: + success = False + + if profiler: profiler.mem_check("custom_leastsq: after linsolve") + if success: #linear solve succeeded + new_x = x + dx + norm_dx = _np.linalg.norm(dx) + + #if verbosity > 1: + # print("--- Inner Loop: mu=%g, norm_dx=%g" % (mu,norm_dx)) + + if norm_dx < rel_tol*norm_x: #use squared qtys instead (speed)? + msg = "relative change in x is small" + converged = True; break + + if norm_dx > (norm_x+rel_tol)/_MACH_PRECISION: + msg = "(near-)singular linear system"; break + + new_f = obj_fn(new_x) + if profiler: profiler.mem_check("custom_leastsq: after obj_fn") + norm_new_f = _np.linalg.norm(new_f) + if not _np.isfinite(norm_new_f): # avoid infinite loop... + msg = "Infinite norm of objective function!"; break + + dF = norm_f - norm_new_f + if dF > 0: #accept step + #print(" Accepted!") + x,f, norm_f = new_x, new_f, norm_new_f + nu = 1.3 + break # exit inner loop normally else: - test_dx[ii] -= STEP - break - - if test_dx[ii] == 0: # then try subtracting - while True: - test_dx[ii] -= STEP - test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f) - if test_normf < last_normf: - last_normf = test_normf - else: - test_dx[ii] += STEP - break - - if abs(test_dx[ii]) > 1e-6: - test_prediction = norm_f + _np.dot(-2 * jtf, test_dx) - tp2_f = f + _np.dot(jac, test_dx) - test_prediction2 = _np.dot(tp2_f, tp2_f) - cmp_dx = dx # -jtf - print(" -> Adjusting index ", ii, ":", x[ii], "+", test_dx[ii], " => ", last_normf, "(cmp w/dx: ", - cmp_dx[ii], test_prediction, test_prediction2, ") ", - "YES" if test_dx[ii] * cmp_dx[ii] > 0 else "NO") - - if _np.linalg.norm(test_dx) > 0 and last_normf < cmp_normf: - print("FOUND HACK dx w/norm = ", _np.linalg.norm(test_dx)) - return test_dx - else: - print("KEEPING ORIGINAL dx") - - #HACK3 - if False: - print("HACK3 - checking if there's a simple dx that is better...") - test_f = obj_fn(x + dx); cmp_normf = _np.dot(test_f, test_f) - orig_prediction = norm_f + _np.dot(2 * jtf, dx) - Jdx = _np.dot(jac, dx) - op2_f = f + Jdx - orig_prediction2 = _np.dot(op2_f, op2_f) - # main objective = fT*f = norm_f - # at new x => (f+J*dx)T * (f+J*dx) = norm_f + JdxT*f + fT*Jdx - # = norm_f + 2*(fT*J)dx (b/c transpose of real# does nothing) - # = norm_f + 2*dxT*(JT*f) - # prediction 2 also includes (J*dx)T * (J*dx) term = dxT * (jtj) * dx - orig_prediction3 = orig_prediction + _np.dot(Jdx, Jdx) - norm_dx = _np.linalg.norm(dx) - print("Compare with suggested |dx| = ", norm_dx, " => ", cmp_normf, - "(predicted: ", orig_prediction, orig_prediction2, orig_prediction3) - STEP = norm_dx # 0.0001 - - #import bpdb; bpdb.set_trace() - test_dx = _np.zeros(len(dx), 'd') - best_ii = -1; best_normf = norm_f; best_dx = 0 - for ii in range(len(dx)): - - #Try adding a small amount - test_dx[ii] = STEP - test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f) - if test_normf < best_normf: - best_normf = test_normf - best_dx = STEP - best_ii = ii + mu *= nu #increase mu else: - test_dx[ii] = -STEP - test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f) - if test_normf < best_normf: - best_normf = test_normf - best_dx = -STEP - best_ii = ii - test_dx[ii] = 0 - - test_dx[best_ii] = best_dx - test_prediction = norm_f + _np.dot(2 * jtf, test_dx) - tp2_f = f + _np.dot(jac, test_dx) - test_prediction2 = _np.dot(tp2_f, tp2_f) - - jj = _np.argmax(_np.abs(dx)) - print("Best decrease = index", best_ii, ":", x[best_ii], '+', best_dx, "==>", - best_normf, " (predictions: ", test_prediction, test_prediction2, ")") - print(" compare with original dx[", best_ii, "]=", dx[best_ii], - "YES" if test_dx[best_ii] * dx[best_ii] > 0 else "NO") - print(" max of abs(dx) is index ", jj, ":", dx[jj], "yes" if jj == best_ii else "no") - - if _np.linalg.norm(test_dx) > 0 and best_normf < cmp_normf: - print("FOUND HACK dx w/norm = ", _np.linalg.norm(test_dx)) - return test_dx - else: - print("KEEPING ORIGINAL dx") - return dx - - -#Wikipedia-version of LM algorithm, testing mu and mu/nu damping params and taking -# mu/nu => new_mu if acceptable... This didn't seem to perform well, but maybe just -# needs some tweaking, so leaving it commented here for reference -#def custom_leastsq_wikip(obj_fn, jac_fn, x0, f_norm_tol=1e-6, jac_norm_tol=1e-6, -# rel_tol=1e-6, max_iter=100, comm=None, verbosity=0, profiler=None): -# msg = "" -# converged = False -# x = x0 -# f = obj_fn(x) -# norm_f = _np.linalg.norm(f) -# tau = 1e-3 #initial mu -# nu = 1.3 -# my_cols_slice = None -# -# -# if not _np.isfinite(norm_f): -# msg = "Infinite norm of objective function at initial point!" -# -# for k in range(max_iter): #outer loop -# # assume x, f, fnorm hold valid values -# -# if len(msg) > 0: -# break #exit outer loop if an exit-message has been set -# -# if norm_f < f_norm_tol: -# msg = "norm(objectivefn) is small" -# converged = True; break -# -# if verbosity > 0: -# print("--- Outer Iter %d: norm_f = %g" % (k,norm_f)) -# -# if profiler: profiler.mem_check("custom_leastsq: begin outer iter *before de-alloc*") -# jac = None; jtj = None; jtf = None -# -# if profiler: profiler.mem_check("custom_leastsq: begin outer iter") -# jac = jac_fn(x) -# if profiler: profiler.mem_check("custom_leastsq: after jacobian:" -# + "shape=%s, GB=%.2f" % (str(jac.shape), -# jac.nbytes/(1024.0**3)) ) -# -# tm = _time.time() -# if my_cols_slice is None: -# my_cols_slice = _mpit.distribute_for_dot(jac.shape[0], comm) -# jtj = _mpit.mpidot(jac.T,jac,my_cols_slice,comm) #_np.dot(jac.T,jac) -# jtf = _np.dot(jac.T,f) -# if profiler: profiler.add_time("custom_leastsq: dotprods",tm) -# -# idiag = _np.diag_indices_from(jtj) -# norm_JTf = _np.linalg.norm(jtf) #, ord='inf') -# norm_x = _np.linalg.norm(x) -# undampled_JTJ_diag = jtj.diagonal().copy() -# -# if norm_JTf < jac_norm_tol: -# msg = "norm(jacobian) is small" -# converged = True; break -# -# if k == 0: -# mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element -# #mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element -# -# #determing increment using adaptive damping -# while True: #inner loop -# -# ### Evaluate with mu' = mu / nu -# mu = mu / nu -# if profiler: profiler.mem_check("custom_leastsq: begin inner iter") -# jtj[idiag] *= (1.0 + mu) # augment normal equations -# #jtj[idiag] += mu # augment normal equations -# -# try: -# if profiler: profiler.mem_check("custom_leastsq: before linsolve") -# tm = _time.time() -# success = True -# dx = _np.linalg.solve(jtj, -jtf) -# if profiler: profiler.add_time("custom_leastsq: linsolve",tm) -# except _np.linalg.LinAlgError: -# success = False -# -# if profiler: profiler.mem_check("custom_leastsq: after linsolve") -# if success: #linear solve succeeded -# new_x = x + dx -# norm_dx = _np.linalg.norm(dx) -# -# #if verbosity > 1: -# # print("--- Inner Loop: mu=%g, norm_dx=%g" % (mu,norm_dx)) -# -# if norm_dx < rel_tol*norm_x: #use squared qtys instead (speed)? -# msg = "relative change in x is small" -# converged = True; break -# -# if norm_dx > (norm_x+rel_tol)/_MACH_PRECISION: -# msg = "(near-)singular linear system"; break -# -# new_f = obj_fn(new_x) -# if profiler: profiler.mem_check("custom_leastsq: after obj_fn") -# norm_new_f = _np.linalg.norm(new_f) -# if not _np.isfinite(norm_new_f): # avoid infinite loop... -# msg = "Infinite norm of objective function!"; break -# -# dF = norm_f - norm_new_f -# if dF > 0: #accept step -# #print(" Accepted!") -# x,f, norm_f = new_x, new_f, norm_new_f -# nu = 1.3 -# break # exit inner loop normally -# else: -# mu *= nu #increase mu -# else: -# #Linear solve failed: -# mu *= nu #increase mu -# nu = 2*nu -# -# jtj[idiag] = undampled_JTJ_diag #restore diagonal for next inner loop iter -# #end of inner loop -# #end of outer loop -# else: -# #if no break stmt hit, then we've exceeded max_iter -# msg = "Maximum iterations (%d) exceeded" % max_iter -# -# return x, converged, msg + #Linear solve failed: + mu *= nu #increase mu + nu = 2*nu + + jtj[idiag] = undampled_JTJ_diag #restore diagonal for next inner loop iter + #end of inner loop + #end of outer loop + else: + #if no break stmt hit, then we've exceeded max_iter + msg = "Maximum iterations (%d) exceeded" % max_iter + + return x, converged, msg +""" diff --git a/pygsti/optimize/optimize.py b/pygsti/optimize/optimize.py index fcb0835f0..7411eb4d1 100644 --- a/pygsti/optimize/optimize.py +++ b/pygsti/optimize/optimize.py @@ -662,90 +662,6 @@ def _evaluate(individual): return solution -#def fmin_homebrew(f, x0, maxiter): -# """ -# Cooked up by Erik, this algorithm is similar to basinhopping but with some tweaks. -# -# Parameters -# ---------- -# fn : function -# The function to minimize. -# -# x0 : numpy array -# The starting point (argument to fn). -# -# maxiter : int -# The maximum number of iterations. -# -# Returns -# ------- -# scipy.optimize.Result object -# Includes members 'x', 'fun', 'success', and 'message'. -# """ -# -# STEP = 0.01 -# MAX_STEPS = int(2.0 / STEP) # allow a change of at most 2.0 -# MAX_DIR_TRIES = 1000 -# T = 1.0 -# -# global_best_params = cur_x0 = x0 -# global_best = cur_f = f(x0) -# N = len(x0) -# trial_x0 = x0.copy() -# -# for it in range(maxiter): -# -# #Minimize using L-BFGS-B -# opts = {'maxiter': maxiter, 'maxfev': maxiter, 'disp': False } -# soln = _spo.minimize(f,trial_x0,options=opts, method='L-BFGS-B',callback=None, tol=1e-8) -# -# # Update global best -# if soln.fun < global_best: -# global_best_params = soln.x -# global_best = soln.fun -# -# #check if we accept the new minimum -# if soln.fun < cur_f or _np.random.random() < _np.exp( -(soln.fun - cur_f)/T ): -# cur_x0 = soln.x; cur_f = soln.fun -# print "Iter %d: f=%g accepted -- global best = %g" % (it, cur_f, global_best) -# else: -# print "Iter %d: f=%g declined" % (it, cur_f) -# -# trial_x0 = None; numTries = 0 -# while trial_x0 is None and numTries < MAX_DIR_TRIES: -# #choose a random direction -# direction = _np.random.random( N ) -# numTries += 1 -# -# #print "DB: test dir %d" % numTries #DEBUG -# -# #kick solution along random direction until the value of f starts to get smaller again (if it ever does) -# # (this indicates we've gone over a maximum along this direction) -# last_f = cur_f -# for i in range(1,MAX_STEPS): -# test_x = cur_x0 + i*STEP * direction -# test_f = f(test_x) -# #print "DB: test step=%f: f=%f" % (i*STEP, test_f) -# if test_f < last_f: -# trial_x0 = test_x -# print "Found new direction in %d tries, new f(x0) = %g" % (numTries,test_f) -# break -# last_f = test_f -# -# if trial_x0 is None: -# raise ValueError("Maximum number of direction tries exceeded") -# -# solution = _optResult() -# solution.x = global_best_params; solution.fun = global_best -# solution.success = True -## if it < maxiter: -## solution.success = True -## else: -## solution.success = False -## solution.message = "Maximum iterations exceeded" -# return solution - - def create_objfn_printer(obj_func, start_time=None): """ Create a callback function that prints the value of an objective function. diff --git a/pygsti/optimize/wildcardopt.py b/pygsti/optimize/wildcardopt.py index 2fc5880d6..f2d794d38 100644 --- a/pygsti/optimize/wildcardopt.py +++ b/pygsti/optimize/wildcardopt.py @@ -67,19 +67,6 @@ def _wildcard_fit_criteria(wv): return max(0, two_dlogl - two_dlogl_threshold) + percircuit_penalty - ##For debugging wildcard (see below for suggested insertion point) - #def _wildcard_fit_criteria_debug(wv): - # dlogl_elements = logl_wildcard_fn.lsvec(wv)**2 # b/c WC fn only has sqrt of terms implemented now - # for i in range(num_circuits): - # dlogl_percircuit[i] = _np.sum(dlogl_elements[layout.indices_for_index(i)], axis=0) - # two_dlogl_percircuit = 2 * dlogl_percircuit - # two_dlogl = sum(two_dlogl_percircuit) - # print("Aggregate penalty = ", two_dlogl, "-", two_dlogl_threshold, "=", two_dlogl - two_dlogl_threshold) - # print("Per-circuit (redbox) penalty = ", sum(_np.clip(two_dlogl_percircuit - redbox_threshold, 0, None))) - # print(" per-circuit threshold = ", redbox_threshold, " highest violators = ") - # sorted_percircuit = sorted(enumerate(two_dlogl_percircuit), key=lambda x: x[1], reverse=True) - # print('\n'.join(["(%d) %s: %g" % (i, layout.circuits[i].str, val) for i, val in sorted_percircuit[0:10]])) - num_iters = 0 wvec_init = budget.to_vector() @@ -541,13 +528,6 @@ def NewtonObjective_derivs(x): Hobj = t * _np.diag(-1.0 / (sqrtVec**3) * (c**2 * x)**2 + c**2 / sqrtVec) + Hbarrier return obj, Dobj, Hobj - #import scipy.optimize - #def barrier_obj(x): - # x = _np.clip(x, 1e-10, None) - # return t * _np.dot(c.T, x) - _np.log(-barrierF(x, False)) - #result = scipy.optimize.minimize(barrier_obj, x, method="CG") - #x = _np.clip(result.x, 0, None) - x, debug_x_list = NewtonSolve(x, NewtonObjective, NewtonObjective_derivs, tol, max_iters, printer - 1) #x, debug_x_list = NewtonSolve(x, NewtonObjective, None, tol, max_iters, printer - 1) # use finite-diff derivs diff --git a/pygsti/protocols/estimate.py b/pygsti/protocols/estimate.py index b478de2a3..897549ae1 100644 --- a/pygsti/protocols/estimate.py +++ b/pygsti/protocols/estimate.py @@ -87,7 +87,6 @@ def from_dir(cls, dirname, quick_load=False): @classmethod def _create_obj_from_doc_and_mongodb(cls, doc, mongodb, quick_load=False): - #def from_mongodb(cls, mongodb_collection, doc_id, ): ret = cls.__new__(cls) _MongoSerializable.__init__(ret, doc.get('_id', None)) ret.__dict__.update(_io.read_auxtree_from_mongodb_doc(mongodb, doc, 'auxfile_types', quick_load=quick_load)) diff --git a/pygsti/protocols/gst.py b/pygsti/protocols/gst.py index 1f373accd..64c015635 100644 --- a/pygsti/protocols/gst.py +++ b/pygsti/protocols/gst.py @@ -1772,11 +1772,6 @@ def __init__(self, modes=('full TP','CPTPLND','Target'), gaugeopt_suite='stdgaug #Advanced options that could be changed by users who know what they're doing self.starting_point = {} # a dict whose keys are modes - #def run_using_germs_and_fiducials(self, dataset, target_model, prep_fiducials, meas_fiducials, germs, max_lengths): - # design = StandardGSTDesign(target_model, prep_fiducials, meas_fiducials, germs, max_lengths) - # data = _proto.ProtocolData(design, dataset) - # return self.run(data) - def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=None, disable_checkpointing=False, simulator: Optional[ForwardSimulator.Castable]=None): """ diff --git a/pygsti/protocols/modeltest.py b/pygsti/protocols/modeltest.py index b29b1b735..d34769624 100644 --- a/pygsti/protocols/modeltest.py +++ b/pygsti/protocols/modeltest.py @@ -128,12 +128,6 @@ def __init__(self, model_to_test, target_model=None, gaugeopt_suite=None, self.circuit_weights = None self.unreliable_ops = ('Gcnot', 'Gcphase', 'Gms', 'Gcn', 'Gcx', 'Gcz') - #def run_using_germs_and_fiducials(self, model, dataset, target_model, prep_fiducials, - # meas_fiducials, germs, maxLengths): - # from .gst import StandardGSTDesign as _StandardGSTDesign - # design = _StandardGSTDesign(target_model, prep_fiducials, meas_fiducials, germs, maxLengths) - # return self.run(_proto.ProtocolData(design, dataset)) - def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=None, disable_checkpointing=False, simulator: Optional[ForwardSimulator.Castable]=None): """ diff --git a/pygsti/protocols/protocol.py b/pygsti/protocols/protocol.py index 28c2459c3..0ef5cd808 100644 --- a/pygsti/protocols/protocol.py +++ b/pygsti/protocols/protocol.py @@ -1464,12 +1464,6 @@ def from_edesign(cls, edesign): else: raise ValueError("Cannot convert a %s to a %s!" % (str(type(edesign)), str(cls))) - #@classmethod - #def from_tensored_circuits(cls, circuits, template_edesign, qubit_labels_per_edesign): - # pass #Useful??? - need to break each circuit into different parts - # based on qubits, then copy (?) template edesign and just replace itself - # all_circuits_needing_data member? - def __init__(self, edesigns, tensored_circuits=None, qubit_labels=None): """ Create a new SimultaneousExperimentDesign object. @@ -1959,9 +1953,6 @@ def is_multipass(self): """ return isinstance(self.dataset, (_data.MultiDataSet, dict)) - #def underlying_tree_paths(self): - # return self.edesign.get_tree_paths() - def prune_tree(self, paths, paths_are_sorted=False): """ Prune the tree rooted here to include only the given paths, discarding all else. diff --git a/pygsti/protocols/vb.py b/pygsti/protocols/vb.py index 95f9d87e4..cecb7bfd5 100644 --- a/pygsti/protocols/vb.py +++ b/pygsti/protocols/vb.py @@ -622,17 +622,6 @@ def _get_circuit_values(icirc, circ, dsrow, idealout): return self._compute_dict(data, self.circuit_statistics, _get_circuit_values, for_passes="first") - # def compute_dscmp_data(self, data, dscomparator): - - # def get_dscmp_values(icirc, circ, dsrow, idealout): - # ret = {'tvds': dscomparator.tvds.get(circ, _np.nan), - # 'pvals': dscomparator.pVals.get(circ, _np.nan), - # 'jsds': dscomparator.jsds.get(circ, _np.nan), - # 'llrs': dscomparator.llrs.get(circ, _np.nan)} - # return ret - - # return self.compute_dict(data, "dscmpdata", self.dsmp_statistics, get_dscmp_values, for_passes="none") - def _compute_predicted_probs(self, data, model): """ Compute the predicted success probabilities of `model` given `data`. @@ -1041,180 +1030,3 @@ def _my_attributes_as_nameddict(self): "SummaryStatisticsResults.statistics dict should be populated with NamedDicts, not %s" % str(type(v)) stats[k] = v return stats - - -#BDB = ByDepthBenchmark -#VBGrid = VolumetricBenchmarkGrid -#VBResults = VolumetricBenchmarkingResults # shorthand - -#Add something like this? -#class PassStabilityTest(_proto.Protocol): -# pass - -# Commented out as we are not using this currently. todo: revive or delete this in the future. -# class VolumetricBenchmarkGrid(Benchmark): -# """ A protocol that creates an entire depth vs. width grid of volumetric benchmark values """ - -# def __init__(self, depths='all', widths='all', datatype='success_probabilities', -# paths='all', statistic='mean', aggregate=True, rescaler='auto', -# dscomparator=None, name=None): - -# super().__init__(name) -# self.postproc = VolumetricBenchmarkGridPP(depths, widths, datatype, paths, statistic, aggregate, self.name) -# self.dscomparator = dscomparator -# self.rescaler = rescaler - -# self.auxfile_types['postproc'] = 'protocolobj' -# self.auxfile_types['dscomparator'] = 'pickle' -# self.auxfile_types['rescaler'] = 'reset' # punt for now - fix later - -# def run(self, data, memlimit=None, comm=None): -# #Since we know that VolumetricBenchmark protocol objects Create a single results just fill -# # in data under the result object's 'volumetric_benchmarks' and 'failure_counts' -# # keys, and these are indexed by width and depth (even though each VolumetricBenchmark -# # only contains data for a single width), we can just "merge" the VB results of all -# # the underlying by-depth datas, so long as they're all for different widths. - -# #Then run resulting data normally, giving a results object -# # with "top level" dicts correpsonding to different paths -# VB = ByDepthBenchmark(self.postproc.depths, self.postproc.datatype, self.postproc.statistic, -# self.rescaler, self.dscomparator, name=self.name) -# separate_results = _proto.SimpleRunner(VB).run(data, memlimit, comm) -# pp_results = self.postproc.run(separate_results, memlimit, comm) -# pp_results.protocol = self -# return pp_results - - -# Commented out as we are not using this currently. todo: revive this in the future. -# class VolumetricBenchmark(_proto.ProtocolPostProcessor): -# """ A postprocesor that constructs a volumetric benchmark from existing results. """ - -# def __init__(self, depths='all', widths='all', datatype='polarization', -# statistic='mean', paths='all', edesigntype=None, aggregate=True, -# name=None): - -# super().__init__(name) -# self.depths = depths -# self.widths = widths -# self.datatype = datatype -# self.paths = paths if paths == 'all' else sorted(paths) # need to ensure paths are grouped by common prefix -# self.statistic = statistic -# self.aggregate = aggregate -# self.edesigntype = edesigntype - -# def run(self, results, memlimit=None, comm=None): -# data = results.data -# paths = results.get_tree_paths() if self.paths == 'all' else self.paths -# #Note: above won't work if given just a results object - needs a dir - -# #Process results -# #Merge/flatten the data from different paths into one depth vs width grid -# passnames = list(data.passes.keys()) if data.is_multipass() else [None] -# passresults = [] -# for passname in passnames: -# vb = _tools.NamedDict('Depth', 'int', None, None) -# fails = _tools.NamedDict('Depth', 'int', None, None) -# path_for_gridloc = {} -# for path in paths: -# #TODO: need to be able to filter based on widths... - maybe replace .update calls -# # with something more complicated when width != 'all' -# #print("Aggregating path = ", path) #TODO - show progress something like this later? - -# #Traverse path to get to root of VB data -# root = results -# for key in path: -# root = root[key] -# root = root.for_protocol.get(self.name, None) -# if root is None: continue - -# if passname: # then we expect final Results are MultiPassResults -# root = root.passes[passname] # now root should be a BenchmarkingResults -# assert(isinstance(root, VolumetricBenchmarkingResults)) -# if self.edesigntype is None: -# assert(isinstance(root.data.edesign, ByDepthDesign)), \ -# "All paths must lead to by-depth exp. design, not %s!" % str(type(root.data.edesign)) -# else: -# if not isinstance(root.data.edsign, self.edesigntype): -# continue - -# #Get the list of depths we'll extract from this (`root`) sub-results -# depths = root.data.edesign.depths if (self.depths == 'all') else \ -# filter(lambda d: d in self.depths, root.data.edesign.depths) -# width = len(root.data.edesign.qubit_labels) # sub-results contains only a single width -# if self.widths != 'all' and width not in self.widths: continue # skip this one - -# for depth in depths: -# if depth not in vb: # and depth not in fails -# vb[depth] = _tools.NamedDict('Width', 'int', 'Value', 'float') -# fails[depth] = _tools.NamedDict('Width', 'int', 'Value', None) -# path_for_gridloc[depth] = {} # just used for meaningful error message - -# if width in path_for_gridloc[depth]: -# raise ValueError(("Paths %s and %s both give data for depth=%d, width=%d! Set the `paths`" -# " argument of this VolumetricBenchmarkGrid to avoid this.") % -# (str(path_for_gridloc[depth][width]), str(path), depth, width)) - -# vb[depth][width] = root.volumetric_benchmarks[depth][width] -# fails[depth][width] = root.failure_counts[depth][width] -# path_for_gridloc[depth][width] = path - -# if self.statistic in ('minmin', 'maxmax') and not self.aggregate: -# self._update_vb_minmin_maxmax(vb) # aggregate now since we won't aggregate over passes - -# #Create Results -# results = VolumetricBenchmarkingResults(data, self) -# results.volumetric_benchmarks = vb -# results.failure_counts = fails -# passresults.append(results) - -# agg_fn = _get_statistic_function(self.statistic) - -# if self.aggregate and len(passnames) > 1: # aggregate pass data into a single set of qty dicts -# agg_vb = _tools.NamedDict('Depth', 'int', None, None) -# agg_fails = _tools.NamedDict('Depth', 'int', None, None) -# template = passresults[0].volumetric_benchmarks # to get widths and depths - -# for depth, template_by_width_data in template.items(): -# agg_vb[depth] = _tools.NamedDict('Width', 'int', 'Value', 'float') -# agg_fails[depth] = _tools.NamedDict('Width', 'int', 'Value', None) - -# for width in template_by_width_data.keys(): -# # ppd = "per pass data" -# vb_ppd = [r.volumetric_benchmarks[depth][width] for r in passresults] -# fail_ppd = [r.failure_counts[depth][width] for r in passresults] - -# successcount = 0 -# failcount = 0 -# for (successcountpass, failcountpass) in fail_ppd: -# successcount += successcountpass -# failcount += failcountpass -# agg_fails[depth][width] = (successcount, failcount) - -# if self.statistic == 'dist': -# agg_vb[depth][width] = [item for sublist in vb_ppd for item in sublist] -# else: -# agg_vb[depth][width] = agg_fn(vb_ppd) - -# aggregated_results = VolumetricBenchmarkingResults(data, self) -# aggregated_results.volumetric_benchmarks = agg_vb -# aggregated_results.failure_counts = agg_fails - -# if self.statistic in ('minmin', 'maxmax'): -# self._update_vb_minmin_maxmax(aggregated_results.qtys['volumetric_benchmarks']) -# return aggregated_results # replace per-pass results with aggregated results -# elif len(passnames) > 1: -# multipass_results = _proto.MultiPassResults(data, self) -# multipass_results.passes.update({passname: r for passname, r in zip(passnames, passresults)}) -# return multipass_results -# else: -# return passresults[0] - -# def _update_vb_minmin_maxmax(self, vb): -# for d in vb.keys(): -# for w in vb[d].keys(): -# for d2 in vb.keys(): -# for w2 in vb[d2].keys(): -# if self.statistic == 'minmin' and d2 <= d and w2 <= w and vb[d2][w2] < vb[d][w]: -# vb[d][w] = vb[d2][w2] -# if self.statistic == 'maxmax' and d2 >= d and w2 >= w and vb[d2][w2] > vb[d][w]: -# vb[d][w] = vb[d2][w2] diff --git a/pygsti/report/factory.py b/pygsti/report/factory.py index 7d97fcd11..8d2f675d7 100644 --- a/pygsti/report/factory.py +++ b/pygsti/report/factory.py @@ -84,21 +84,6 @@ def _add_lbl(lst, lbl): return running_lbls -#def _robust_estimate_has_same_models(estimates, est_lbl): -# lbl_robust = est_lbl+ROBUST_SUFFIX -# if lbl_robust not in estimates: return False #no robust estimate -# -# for mdl_lbl in list(estimates[est_lbl].goparameters.keys()) \ -# + ['final iteration estimate']: -# if mdl_lbl not in estimates[lbl_robust].models: -# return False #robust estimate is missing mdl_lbl! -# -# mdl = estimates[lbl_robust].models[mdl_lbl] -# if estimates[est_lbl].models[mdl_lbl].frobeniusdist(mdl) > 1e-8: -# return False #model mismatch! -# -# return True - def _get_viewable_crf(est, est_lbl, mdl_lbl, verbosity=0): printer = _VerbosityPrinter.create_printer(verbosity) diff --git a/pygsti/report/fogidiagram.py b/pygsti/report/fogidiagram.py index a53b1681a..a93486fdd 100644 --- a/pygsti/report/fogidiagram.py +++ b/pygsti/report/fogidiagram.py @@ -379,10 +379,6 @@ def __init__(self, fogi_stores, op_coefficients, model_dim, op_to_target_qubits= def _normalize(self, v): return -_np.log10(max(v, 10**(-self.MAX_POWER)) * 10**self.MIN_POWER) / (self.MAX_POWER - self.MIN_POWER) - #def _normalize(v): - # v = min(max(v, 10**(-MAX_POWER)), 10**(-MIN_POWER)) - # return 1.0 - v / (10**(-MIN_POWER) - 10**(-MAX_POWER)) - def _node_HScolor(self, Hvalue, Svalue): r, g, b, a = _Hcmap(self._normalize(Hvalue)) r2, g2, b2, a2 = _Scmap(self._normalize(Svalue)) @@ -622,18 +618,6 @@ def _render_drawing(self, drawing, filename): if filename: d.saveSvg(filename) return d - #def _draw_node_simple(self, drawing, r, theta, coh, sto, op_label, total, val_max): - # nodes = drawing.nodes - # back_color, border_color, tcolor, _, labels, _ = self._get_node_colors(coh, sto, total) - # x, y = r * _np.cos(theta), r * _np.sin(theta) - # scale = (coh + sto) / val_max - # node_width = 20 + 40 * scale - # node_height = 20 + 40 * scale - # nodes.append(_draw.Rectangle(x - node_width / 2, y - node_height / 2, node_width, node_height, rx=3, - # fill=back_color, stroke=border_color, stroke_width=2)) - # nodes.append(_draw.Text(labels, self.node_fontsize * (0.5 + scale), x, y, fill=tcolor, - # text_anchor="middle", valign='middle', font_family='Times')) - def _draw_node(self, drawing, r, theta, coh, sto, op_label, total, val_max, groupid, info): nodes = drawing.nodes back_color, border_color, tcolor, _, labels, _ = self._get_node_colors(coh, sto, total) diff --git a/pygsti/report/reportables.py b/pygsti/report/reportables.py index 933da26c0..e4fa68146 100644 --- a/pygsti/report/reportables.py +++ b/pygsti/report/reportables.py @@ -332,13 +332,6 @@ def evaluate_nearby(self, nearby_model): # ref for eigenvalue derivatives: https://www.win.tue.nl/casa/meetings/seminar/previous/_abstract051019_files/Presentation.pdf # noqa -#def circuit_eigenvalues(model, circuit): -# return _np.array(sorted(_np.linalg.eigvals(model.sim.product(circuit)), -# key=lambda ev: abs(ev), reverse=True)) -#CircuitEigenvalues = _modf.modelfn_factory(circuit_eigenvalues) -## init args == (model, circuit) - - def rel_circuit_eigenvalues(model_a, model_b, circuit): """ Eigenvalues of dot(productB(circuit)^-1, productA(circuit)) @@ -543,13 +536,6 @@ def evaluate_nearby(self, nearby_model): val = 0.5 * (_np.vdot(J.real, self.W.real) + _np.vdot(J.imag, self.W.imag)) return val - #def circuit_half_diamond_norm(model_a, model_b, circuit): - # A = model_a.sim.product(circuit) # "gate" - # B = model_b.sim.product(circuit) # "target gate" - # return half_diamond_norm(A, B, model_b.basis) - #CircuitHalfDiamondNorm = _modf.modelfn_factory(circuit_half_diamond_norm) - # # init args == (model_a, model_b, circuit) - else: circuit_half_diamond_norm = None CircuitHalfDiamondNorm = _null_fn diff --git a/pygsti/report/workspace.py b/pygsti/report/workspace.py index e0f90d1be..a1016ea02 100644 --- a/pygsti/report/workspace.py +++ b/pygsti/report/workspace.py @@ -1444,13 +1444,8 @@ def __getattr__(self, attr): #use __dict__ so no chance for recursive __getattr__ return getattr(self.__dict__['base'], attr) - def __len__(self): return len(self.base) - #Future - arithmetic ops should return a new SwitchValue - #def __add__(self,x): return self.base + x - #def __sub__(self,x): return self.base - x - #def __mul__(self,x): return self.base * x - #def __truediv__(self, x): return self.base / x - + def __len__(self): + return len(self.base) class WorkspaceOutput(object): """ @@ -2516,13 +2511,6 @@ def render(self, typ="html", id=None): plotID = "plot_" + id if typ == "html": - - #def getPlotlyDivID(html): - # #could make this more robust using lxml or something later... - # iStart = html.index('div id="') - # iEnd = html.index('"', iStart+8) - # return html[iStart+8:iEnd] - ##pick "master" plot, whose resizing dictates the resizing of other plots, ## as the largest-height plot. #iMaster = None; maxH = 0; diff --git a/pygsti/report/workspaceplots.py b/pygsti/report/workspaceplots.py index b285f2845..ff1af4ed4 100644 --- a/pygsti/report/workspaceplots.py +++ b/pygsti/report/workspaceplots.py @@ -1703,19 +1703,6 @@ def _create(self, plottypes, circuits, dataset, model, prec, sum_up, box_labels, dataset = mdc_store.dataset model = mdc_store.model - #DEBUG: for checking - #def _addl_mx_fn_chk(plaq,x,y): - # gsplaq_ds = plaq.expand_aliases(dataset) - # spamlabels = model.get_spam_labels() - # cntMxs = _ph.total_count_matrix( gsplaq_ds, dataset)[None,:,:] - # probMxs = _ph.probability_matrices( plaq, model, spamlabels, - # probs_precomp_dict) - # freqMxs = _ph.frequency_matrices( gsplaq_ds, dataset, spamlabels) - # logLMxs = _tools.two_delta_logl_term( cntMxs, probMxs, freqMxs, 1e-4) - # return logLMxs.sum(axis=0) # sum over spam labels - - # End "Additional sub-matrix" functions - if not isinstance(plottypes, (list, tuple)): plottypes = [plottypes] @@ -2178,12 +2165,6 @@ def _mx_fn_driftpv(plaq, x, y, instabilityanalyzertuple): def _mx_fn_drifttvd(plaq, x, y, instabilityanalyzertuple): return _ph.drift_maxtvd_matrices(plaq, instabilityanalyzertuple) -# future: delete this, or update it and added it back in. -# def _mx_fn_driftpwr(plaq, x, y, driftresults): -# return _ph.drift_maxpower_matrices(plaq, driftresults) - -# Begin "Additional sub-matrix" functions for adding more info to hover text - def _outcome_to_str(x): # same function as in writers.py if isinstance(x, str): return x @@ -3955,411 +3936,3 @@ def _create(self, rb_r, fitkey, decay, success_probabilities, ylim, xlim, #reverse order of data so z-ordering is nicer return ReportFigure(go.Figure(data=list(data), layout=layout), None, pythonVal) - - -#This older version on an RB decay plot contained a lot more theory detail -# compared with the current one - so we'll keep it around (commented out) -# in case we want to steal/revive pieces of it in the future. -#class OLDRandomizedBenchmarkingPlot(WorkspacePlot): -# """ Plot of RB Decay curve """ -# def __init__(self, ws, rb_r,xlim=None, ylim=None, -# fit='standard', Magesan_zeroth=False, Magesan_first=False, -# exact_decay=False,L_matrix_decay=False, Magesan_zeroth_SEB=False, -# Magesan_first_SEB=False, L_matrix_decay_SEB=False,mdl=False, -# target_model=False,group=False, group_to_model=None, norm='1to1', legend=True, -# title='Randomized Benchmarking Decay', scale=1.0): -# """ -# Plot RB decay curve, as a function of some the sequence length -# computed using the `gstyp` gate-label-set. -# -# Parameters -# ---------- -# rb_r : RBResults -# The RB results object containing all the relevant RB data. -# -# gstyp : str, optional -# The gate-label-set specifying which translation (i.e. strings with -# which operation labels) to use when computing sequence lengths. -# -# xlim : tuple, optional -# The x-range as (xmin,xmax). -# -# ylim : tuple, optional -# The y-range as (ymin,ymax). -# -# save_fig_path : str, optional -# If not None, the filename where the resulting plot should be saved. -# -# fitting : str, optional -# Allowed values are 'standard', 'first order' or 'all'. Specifies -# whether the zeroth or first order fitting model results are plotted, -# or both. -# -# Magesan_zeroth : bool, optional -# If True, plots the decay predicted by the 'zeroth order' theory of Magesan -# et al. PRA 85 042311 2012. Requires mdl and target_model to be specified. -# -# Magesan_first : bool, optional -# If True, plots the decay predicted by the 'first order' theory of Magesan -# et al. PRA 85 042311 2012. Requires mdl and target_model to be specified. -# -# Magesan_zeroth_SEB : bool, optional -# If True, plots the systematic error bound for the 'zeroth order' theory -# predicted decay. This is the region around the zeroth order decay in which -# the exact RB average survival probabilities are guaranteed to fall. -# -# Magesan_first_SEB : bool, optional -# As above, but for 'first order' theory. -# -# exact_decay : bool, optional -# If True, plots the exact RB decay, as predicted by the 'R matrix' theory -# of arXiv:1702.01853. Requires mdl and group to be specified -# -# L_matrix_decay : bool, optional -# If True, plots the RB decay, as predicted by the approximate 'L matrix' -# theory of arXiv:1702.01853. Requires mdl and target_model to be specified. -# -# L_matrix_decay_SEB : bool, optional -# If True, plots the systematic error bound for approximate 'L matrix' -# theory of arXiv:1702.01853. This is the region around predicted decay -# in which the exact RB average survival probabilities are guaranteed -# to fall. -# -# mdl : model, optional -# Required, if plotting any of the theory decays. The model for which -# these decays should be plotted for. -# -# target_model : Model, optional -# Required, if plotting certain theory decays. The target model for which -# these decays should be plotted for. -# -# group : MatrixGroup, optional -# Required, if plotting R matrix theory decay. The matrix group that mdl -# is an implementation of. -# -# group_to_model : dict, optional -# If not None, a dictionary that maps labels of group elements to labels -# of mdl. Only used if subset_sampling is not None. If subset_sampling is -# not None and the mdl and group elements have the same labels, this dictionary -# is not required. Otherwise it is necessary. -# -# norm : str, optional -# The norm used for calculating the Magesan theory bounds. -# -# legend : bool, optional -# Specifies whether a legend is added to the graph -# -# title : str, optional -# Specifies a title for the graph -# -# Returns -# ------- -# None -# """ -# # loc : str, optional -# # Specifies the location of the legend. -# super(RandomizedBenchmarkingPlot,self).__init__( -# ws, self._create, rb_r, xlim, ylim, fit, Magesan_zeroth, -# Magesan_first, exact_decay, L_matrix_decay, Magesan_zeroth_SEB, -# Magesan_first_SEB, L_matrix_decay_SEB, mdl, target_model, group, -# group_to_model, norm, legend, title, scale) -# -# def _create(self, rb_r, xlim, ylim, fit, Magesan_zeroth, -# Magesan_first, exact_decay, L_matrix_decay, Magesan_zeroth_SEB, -# Magesan_first_SEB, L_matrix_decay_SEB, mdl, target_model, group, -# group_to_model, norm, legend, title, scale): -# -# from ..extras.rb import rbutils as _rbutils -# #TODO: maybe move the computational/fitting part of this function -# # back to the RBResults object to reduce the logic (and dependence -# # on rbutils) here. -# -# #newplot = _plt.figure(figsize=(8, 4)) -# #newplotgca = newplot.gca() -# -# # Note: minus one to get xdata that discounts final Clifford-inverse -# xdata = _np.asarray(rb_r.results['lengths']) - 1 -# ydata = _np.asarray(rb_r.results['successes']) -# A = rb_r.results['A'] -# B = rb_r.results['B'] -# f = rb_r.results['f'] -# if fit == 'first order': -# C = rb_r.results['C'] -# pre_avg = rb_r.pre_avg -# -# if (Magesan_zeroth_SEB is True) and (Magesan_zeroth is False): -# print("As Magesan_zeroth_SEB is True, Setting Magesan_zeroth to True\n") -# Magesan_zeroth = True -# if (Magesan_first_SEB is True) and (Magesan_first is False): -# print("As Magesan_first_SEB is True, Setting Magesan_first to True\n") -# Magesan_first = True -# -# if (Magesan_zeroth is True) or (Magesan_first is True): -# if (mdl is False) or (target_model is False): -# raise ValueError("To plot Magesan et al theory decay curves a model" + -# " and a target model is required.") -# else: -# MTP = _rbutils.Magesan_theory_parameters(mdl, target_model, -# success_outcomelabel=rb_r.success_outcomelabel, -# norm=norm,d=rb_r.d) -# f_an = MTP['p'] -# A_an = MTP['A'] -# B_an = MTP['B'] -# A1_an = MTP['A1'] -# B1_an = MTP['B1'] -# C1_an = MTP['C1'] -# delta = MTP['delta'] -# -# if exact_decay is True: -# if (mdl is False) or (group is False): -# raise ValueError("To plot the exact decay curve a model" + -# " and the target group are required.") -# else: -# mvalues,ASPs = _rbutils.exact_rb_asps(mdl,group,max(xdata),m_min=1,m_step=1, -# d=rb_r.d, group_to_model=group_to_model, -# success_outcomelabel=rb_r.success_outcomelabel) -# -# if L_matrix_decay is True: -# if (mdl is False) or (target_model is False): -# raise ValueError("To plot the L matrix theory decay curve a model" + -# " and a target model is required.") -# else: -# mvalues, LM_ASPs, LM_ASPs_SEB_lower, LM_ASPs_SEB_upper = \ -# _rbutils.L_matrix_asps(mdl,target_model,max(xdata),m_min=1,m_step=1,d=rb_r.d, -# success_outcomelabel=rb_r.success_outcomelabel, error_bounds=True) -# -# xlabel = 'Sequence length' -# -# data = [] # list of traces -# data.append( go.Scatter( -# x = xdata, y = ydata, -# mode = 'markers', -# marker = dict( -# color = "rgb(0,0,0)", -# size = 6 if pre_avg else 3 -# ), -# name = 'Averaged RB data' if pre_avg else 'RB data', -# )) -# -# if fit=='standard' or fit=='first order': -# fit_label_1='Fit' -# fit_label_2='Fit' -# color2 = "black" -# -# theory_color2 = "green" -# theory_fill2 = "rgba(0,128,0,0.1)" -# if Magesan_zeroth is True and Magesan_first is True: -# theory_color2 = "magenta" -# theory_fill2 = "rgba(255,0,255,0.1)" -# -# if fit=='standard': -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.standard_fit_function(_np.arange(max(xdata)),A,B,f), -# mode = 'lines', -# line = dict(width=1, color="black"), -# name = fit_label_1, -# showlegend=legend, -# )) -# -# if fit=='first order': -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.first_order_fit_function(_np.arange(max(xdata)),A,B,C,f), -# mode = 'lines', -# line = dict(width=1, color=color2), -# name = fit_label_2, -# showlegend=legend, -# )) -# -# if Magesan_zeroth is True: -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.standard_fit_function(_np.arange(max(xdata)),A_an,B_an,f_an), -# mode = 'lines', -# line = dict(width=2, color="green", dash='dash'), -# name = '0th order theory', -# showlegend=legend, -# )) -# -# if Magesan_zeroth_SEB is True: -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.seb_upper( -# _rbutils.standard_fit_function(_np.arange(max(xdata)),A_an,B_an,f_an), -# _np.arange(max(xdata)), delta, order='zeroth'), -# mode = 'lines', -# line = dict(width=0.5, color="green"), -# name = '0th order bound', -# fill='tonexty', -# fillcolor='rgba(0,128,0,0.1)', -# showlegend=False, -# )) -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.seb_lower( -# _rbutils.standard_fit_function(_np.arange(max(xdata)),A_an,B_an,f_an), -# _np.arange(max(xdata)), delta, order='zeroth'), -# mode = 'lines', -# line = dict(width=0.5, color="green"), -# name = '0th order bound', -# showlegend=False, -# )) -# -# -# if Magesan_first is True: -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.first_order_fit_function(_np.arange(max(xdata)),A1_an,B1_an,C1_an,f_an), -# mode = 'lines', -# line = dict(width=2, color=theory_color2, dash='dash'), -# name = '1st order theory', -# showlegend=legend, -# )) -# -# if Magesan_first_SEB is True: -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.seb_upper( -# _rbutils.first_order_fit_function(_np.arange(max(xdata)),A1_an,B1_an,C1_an,f_an), -# _np.arange(max(xdata)), delta, order='first'), -# mode = 'lines', -# line = dict(width=0.5, color=theory_color2), #linewidth=4? -# name = '1st order bound', -# fill='tonexty', -# fillcolor=theory_fill2, -# showlegend=False, -# )) -# data.append( go.Scatter( -# x = _np.arange(max(xdata)), -# y = _rbutils.seb_lower( -# _rbutils.first_order_fit_function(_np.arange(max(xdata)),A1_an,B1_an,C1_an,f_an), -# _np.arange(max(xdata)), delta, order='first'), -# mode = 'lines', -# line = dict(width=0.5, color=theory_color2), -# name = '1st order bound', -# showlegend=False, -# )) -# -# -# if exact_decay is True: -# data.append( go.Scatter( -# x = mvalues, -# y = ASPs, -# mode = 'lines', -# line = dict(width=2, color="blue",dash='dash'), -# name = 'Exact decay', -# showlegend=legend, -# )) -# -# if L_matrix_decay is True: -# data.append( go.Scatter( -# x = mvalues, -# y = LM_ASPs, -# mode = 'lines', -# line = dict(width=2, color="cyan",dash='dash'), -# name = 'L matrix decay', -# showlegend=legend, -# )) -# if L_matrix_decay_SEB is True: -# data.append( go.Scatter( -# x = mvalues, -# y = LM_ASPs_SEB_upper, -# mode = 'lines', -# line = dict(width=0.5, color="cyan"), -# name = 'LM bound', -# fill='tonexty', -# fillcolor='rgba(0,255,255,0.1)', -# showlegend=False, -# )) -# data.append( go.Scatter( -# x = mvalues, -# y = LM_ASPs_SEB_lower, -# mode = 'lines', -# line = dict(width=0.5, color="cyan"), -# name = 'LM bound', -# showlegend=False, -# )) -# -# ymin = min([min(trace['y']) for trace in data]) -# ymin -= 0.1*abs(1.0-ymin) #pad by 10% -# -# layout = go.Layout( -# width=800*scale, -# height=400*scale, -# title=title, -# titlefont=dict(size=16), -# xaxis=dict( -# title=xlabel, -# titlefont=dict(size=14), -# range=xlim if xlim else [0,max(xdata)], -# ), -# yaxis=dict( -# title='Mean survival probability', -# titlefont=dict(size=14), -# range=ylim if ylim else [ymin,1.0], -# ), -# legend=dict( -# font=dict( -# size=13, -# ), -# ) -# ) -# -# pythonVal = {} -# for i,tr in enumerate(data): -# key = tr['name'] if ("name" in tr) else "trace%d" % i -# pythonVal[key] = {'x': tr['x'], 'y': tr['y']} -# -# #reverse order of data so z-ordering is nicer -# return ReportFigure(go.Figure(data=list(reversed(data)), layout=layout), -# None, pythonVal) -# -# #newplotgca.set_xlabel(xlabel, fontsize=15) -# #newplotgca.set_ylabel('Mean survival probability',fontsize=15) -# #if title==True: -# # newplotgca.set_title('Randomized Benchmarking Decay', fontsize=18) -# #newplotgca.set_frame_on(True) -# #newplotgca.yaxis.grid(False) -# #newplotgca.tick_params(axis='x', top='off', labelsize=12) -# #newplotgca.tick_params(axis='y', left='off', right='off', labelsize=12) -# -# #if legend==True: -# # leg = _plt.legend(fancybox=True, loc=loc) -# # leg.get_frame().set_alpha(0.9) -# -# #newplotgca.spines["top"].set_visible(False) -# #newplotgca.spines["right"].set_visible(False) -# #newplotgca.spines["bottom"].set_alpha(.7) -# #newplotgca.spines["left"].set_alpha(.7) - - -#Histograms?? -#TODO: histogram -# if histogram: -# fig = _plt.figure() -# histdata = subMxSums.flatten() -# #take gives back (1,N) shaped array (why?) -# histdata_finite = _np.take(histdata, _np.where(_np.isfinite(histdata)))[0] -# histMin = min( histdata_finite ) if cmapFactory.vmin is None else cmapFactory.vmin -# histMax = max( histdata_finite ) if cmapFactory.vmax is None else cmapFactory.vmax -# _plt.hist(_np.clip(histdata_finite,histMin,histMax), histBins, -# range=[histMin, histMax], facecolor='gray', align='mid') -# if save_to is not None: -# if len(save_to) > 0: -# _plt.savefig( _makeHistFilename(save_to) ) -# _plt.close(fig) - -# if histogram: -# fig = _plt.figure() -# histdata = _np.concatenate( [ sub_mxs[iy][ix].flatten() for ix in range(nXs) for iy in range(nYs)] ) -# #take gives back (1,N) shaped array (why?) -# histdata_finite = _np.take(histdata, _np.where(_np.isfinite(histdata)))[0] -# histMin = min( histdata_finite ) if cmapFactory.vmin is None else cmapFactory.vmin -# histMax = max( histdata_finite ) if cmapFactory.vmax is None else cmapFactory.vmax -# _plt.hist(_np.clip(histdata_finite,histMin,histMax), histBins, -# range=[histMin, histMax], facecolor='gray', align='mid') -# if save_to is not None: -# if len(save_to) > 0: -# _plt.savefig( _makeHistFilename(save_to) ) -# _plt.close(fig) diff --git a/pygsti/tools/basistools.py b/pygsti/tools/basistools.py index b87c59f67..2ec896cd5 100644 --- a/pygsti/tools/basistools.py +++ b/pygsti/tools/basistools.py @@ -201,37 +201,6 @@ def change_basis(mx, from_basis, to_basis): (_mt.safe_norm(ret, 'imag'), from_basis, to_basis, ret)) return ret.real -#def transform_matrix(from_basis, to_basis, dim_or_block_dims=None, sparse=False): -# ''' -# Compute the transformation matrix between two bases -# -# Parameters -# ---------- -# from_basis : Basis or str -# Basis being converted from -# -# to_basis : Basis or str -# Basis being converted to -# -# dim_or_block_dims : int or list of ints -# if strings provided as bases, the dimension of basis to use. -# -# sparse : bool, optional -# Whether to construct a sparse or dense transform matrix -# when this isn't specified already by `from_basis` or -# `to_basis` (e.g. when these are both strings). -# -# Returns -# ------- -# Basis -# the composite basis created -# ''' -# if dim_or_block_dims is None: -# assert isinstance(from_basis, Basis) -# else: -# from_basis = Basis(from_basis, dim_or_block_dims, sparse=sparse) -# return from_basis.transform_matrix(to_basis) - def create_basis_pair(mx, from_basis, to_basis): """ diff --git a/pygsti/tools/listtools.py b/pygsti/tools/listtools.py index 67de5be94..0beb2749c 100644 --- a/pygsti/tools/listtools.py +++ b/pygsti/tools/listtools.py @@ -383,117 +383,3 @@ def lists_to_tuples(obj): return {lists_to_tuples(k): lists_to_tuples(v) for k, v in obj.items()} else: return obj - - -# ------------------------------------------------------------------------------ -# Machinery initially designed for an in-place take operation, which computes -# how to do in-place permutations of arrays/lists efficiently. Kept here -# commented out in case this is needed some time in the future. -# ------------------------------------------------------------------------------ -# -#def build_permute_copy_order(indices): -# #Construct a list of the operations needed to "take" indices -# # out of an array. -# -# nIndices = len(indices) -# flgs = _np.zeros(nIndices,'bool') #flags indicating an index has been processed -# shelved = {} -# copyList = [] -# -# while True: #loop until we've processed everything -# -# #The cycle has ended. Now find an unprocessed -# # destination to begin a new cycle -# for i in range(nIndices): -# if flgs[i] == False: -# if indices[i] == i: # index i is already where it need to be! -# flgs[i] = True -# else: -# cycleFirstIndex = iDest = i -# if cycleFirstIndex in indices: -# copyList.append( (-1,i) ) # iDest == -1 means copy to offline storage -# break; -# else: -# break #everything has been processed -- we're done! -# -# while True: # loop over cycle -# -# # at this point, data for index iDest has been stored or copied -# iSrc = indices[iDest] # get source index for current destination -# -# # record appropriate copy command -# if iSrc == cycleFirstIndex: -# copyList.append( (iDest, -1) ) # copy from offline storage -# flgs[iDest] = True -# -# #end of this cycle since we've hit our starting point, -# # but no need to shelve first cycle element in this case. -# break #(end of cycle) -# else: -# if iSrc in shelved: #original iSrc is now at index shelved[iSrc] -# iSrc = shelved[iSrc] -# -# copyList.append( (iDest,iSrc) ) # => copy src -> dest -# flgs[iDest] = True -# -# if iSrc < nIndices: -# #Continue cycle (swapping within "active" (index < nIndices) region) -# iDest = iSrc # make src the new dest -# else: -# #end of this cycle, and first cycle index hasn't been -# # used, so shelve it (store it for later use) if it -# # will be needed in the future. -# if cycleFirstIndex in indices: -# copyList.append( (iSrc,-1) ) -# shelved[cycleFirstIndex] = iSrc -# -# break #(end of cycle) -# -# return copyList -# -## X X X -## 0 1 2 3 (nIndices == 4) -## 3, 0, 7, 4 -## store 0 -## 3 -> 0 -## 4 -> 3 -## stored[0] -> 4, shelved[0] = 4 -## store 1 -## shelved[0]==4 -> 1, NO((stored[1] -> 4, shelved[1] = 4)) B/C don't need index 1 -## store 2 -## 7 -> 2 -## NO((Stored[2] -> 7, istore[2] = 7)) -# -# -#def inplace_take(a, indices, axis=None, copyList=None): -# check = a.take(indices, axis=axis) #DEBUGGING -# return check #FIX FOR NOW = COPY -# -# if axis is None: -# def mkindex(i): -# return i -# else: -# def mkindex(i): -# sl = [slice(None)] * a.ndim -# sl[axis] = i -# return sl -# -# if copyList is None: -# copyList = build_permute_copy_order(indices) -# -# store = None -# for iDest,iSrc in copyList: -# if iDest == -1: store = a[mkindex(iSrc)].copy() #otherwise just get a view! -# elif iSrc == -1: a[mkindex(iDest)] = store -# else: a[mkindex(iDest)] = a[mkindex(iSrc)] -# -# ret = a[mkindex(slice(0,len(indices)))] -# if _np.linalg.norm(ret-check) > 1e-8 : -# print("ERROR CHECK FAILED") -# print("ret = ",ret) -# print("check = ",check) -# print("diff = ",_np.linalg.norm(ret-check)) -# assert(False) -# #check = None #free mem? -# #return ret -# return check diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index 77a826c97..94940c45c 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -1833,23 +1833,6 @@ def _custom_expm_multiply_simple_core(a, b, mu, m_star, s, tol, eta): # t == 1. return F -#From SciPy source, as a reference - above we assume A is a sparse csr matrix -# and B is a dense vector -#def _exact_inf_norm(A): -# # A compatibility function which should eventually disappear. -# if scipy.sparse.isspmatrix(A): -# return max(abs(A).sum(axis=1).flat) -# else: -# return np.linalg.norm(A, np.inf) -# -# -#def _exact_1_norm(A): -# # A compatibility function which should eventually disappear. -# if scipy.sparse.isspmatrix(A): -# return max(abs(A).sum(axis=0).flat) -# else: -# return np.linalg.norm(A, 1) - def expop_multiply_prep(op, a_1_norm=None, tol=EXPM_DEFAULT_TOL): """ Returns "prepared" meta-info about operation op, which is assumed to be traceless (so no shift is needed). @@ -2216,15 +2199,6 @@ def union_space(space1, space2, tol=1e-7): return VW[:, indep_cols] -#UNUSED -#def spectral_radius(x): -# if hasattr(x, 'ndim') and x.ndim == 2: # then interpret as a numpy array and take norm -# evals = _np.sort(_np.linalg.eigvals(x)) -# return abs(evals[-1] - evals[0]) -# else: -# return x - - def jamiolkowski_angle(hamiltonian_mx): """ TODO: docstring diff --git a/pygsti/tools/pdftools.py b/pygsti/tools/pdftools.py index c73e27dc4..57a3e65de 100644 --- a/pygsti/tools/pdftools.py +++ b/pygsti/tools/pdftools.py @@ -71,28 +71,3 @@ def classical_fidelity(p, q): # sqrt_fidelity += _np.sqrt(x * y) return _np.sum([_np.sqrt(x * q.get(event, 0.)) for (event, x) in p.items()]) ** 2 - - #return root_fidelity ** 2 - - -# def Hoyer_sparsity_measure(p, n): -# """ -# Computes a measure of the sparsity ("spikyness") of a probability distribution (or a -# general real vector). - -# Parameters -# ---------- -# p : dict -# The distribution - -# n : the number of possible events (zero probability events do not need to be included in `p`) - -# Returns -# ------- -# float -# """ -# plist = _np.array(list(p.values())) -# twonorm = _np.sqrt(_np.sum(plist**2)) -# onenorm = _np.sum(_np.abs(plist)) -# max_onenorm_over_twonorm = _np.sqrt(n) -# return (max_onenorm_over_twonorm - onenorm/twonorm) / (max_onenorm_over_twonorm - 1) diff --git a/pygsti/tools/rbtheory.py b/pygsti/tools/rbtheory.py index 79e23f06c..7191bbb03 100644 --- a/pygsti/tools/rbtheory.py +++ b/pygsti/tools/rbtheory.py @@ -458,255 +458,6 @@ def R_matrix(model, group, group_to_model=None, weights=None): # noqa N802 return R -### COMMENTED OUT SO THAT THIS FILE DOESN'T NEED "from .. import construction as _cnst". -### THIS SHOULD BE ADDED BACK IN AT SOME POINT. -# def exact_rb_asps(model, group, m_max, m_min=0, m_step=1, success_outcomelabel=('0',), -# group_to_model=None, weights=None, compilation=None, group_twirled=False): -# """ -# Calculates the exact RB average success probablilites (ASP). - -# Uses some generalizations of the formula given Proctor et al -# Phys. Rev. Lett. 119, 130502 (2017). This formula does not scale well with -# group size and qubit number, and for the Clifford group it is likely only -# practical for a single qubit. - -# Parameters -# ---------- -# model : Model -# The noisy model (e.g., the Cliffords) to calculate the R matrix of. -# The correpsonding `target` model (not required in this function) -# must be equal to or a subset of (a faithful rep of) the group `group`. -# If group_to_model is None, the labels of the gates in model should be -# the same as the labels of the corresponding group elements in `group`. -# For Clifford RB `model` should be the clifford model; for direct RB -# this should be the native model. - -# group : MatrixGroup -# The group that the `model` model contains gates from. For Clifford RB -# or direct RB, this would be the Clifford group. - -# m_max : int -# The maximal sequence length of the random gates, not including the -# inversion gate. - -# m_min : int, optional -# The minimal sequence length. Defaults to the smallest valid value of 0. - -# m_step : int, optional -# The step size between sequence lengths. Defaults to the smallest valid -# value of 1. - -# success_outcomelabel : str or tuple, optional -# The outcome label associated with success. - -# group_to_model : dict, optional -# If not None, a dictionary that maps labels of group elements to labels -# of model. This is required if the labels of the gates in `model` are different -# from the labels of the corresponding group elements in `group`. - -# weights : dict, optional -# If not None, a dictionary of floats, whereby the keys are the gates in model -# and the values are the unnormalized probabilities to apply each gate at -# for each layer of the RB protocol. If None, the weighting defaults to an -# equal weighting on all gates, as used in most RB protocols (e.g., Clifford -# RB). - -# compilation : dict, optional -# If `model` is not the full group `group` (with the same labels), then a -# compilation for the group elements, used to implement the inversion gate -# (and the initial randomgroup element, if `group_twirled` is True). This -# is a dictionary with the group labels as keys and a gate sequence of the -# elements of `model` as values. - -# group_twirled : bool, optional -# If True, the random sequence starts with a single uniformly random group -# element before the m random elements of `model`. - -# Returns -# ------- -# m : float -# Array of sequence length values that the ASPs have been calculated for. - -# P_m : float -# Array containing ASP values for the specified sequence length values. -# """ -# if compilation is None: -# for key in list(model.operations.keys()): -# assert(key in group.labels), "Gates labels are not in `group`, so `compilation must be specified." -# for label in group.labels: -# assert(label in list(model.operations.keys()) -# ), "Some group elements not in `model`, so `compilation must be specified." - -# i_max = _np.floor((m_max - m_min) / m_step).astype('int') -# m = _np.zeros(1 + i_max, _np.int64) -# P_m = _np.zeros(1 + i_max, float) -# group_dim = len(group) -# R = R_matrix(model, group, group_to_model=group_to_model, weights=weights) -# success_prepLabel = list(model.preps.keys())[0] # just take first prep -# success_effectLabel = success_outcomelabel[-1] if isinstance(success_outcomelabel, tuple) \ -# else success_outcomelabel -# extended_E = _np.kron(_mtls.column_basis_vector(0, group_dim).T, model.povms['Mdefault'][success_effectLabel].T) -# extended_rho = _np.kron(_mtls.column_basis_vector(0, group_dim), model.preps[success_prepLabel]) - -# if compilation is None: -# extended_E = group_dim * _np.dot(extended_E, R) -# if group_twirled is True: -# extended_rho = _np.dot(R, extended_rho) -# else: -# full_model = _cnst.create_explicit_alias_model(model, compilation) -# R_fullgroup = R_matrix(full_model, group) -# extended_E = group_dim * _np.dot(extended_E, R_fullgroup) -# if group_twirled is True: -# extended_rho = _np.dot(R_fullgroup, extended_rho) - -# Rstep = _np.linalg.matrix_power(R, m_step) -# Riterate = _np.linalg.matrix_power(R, m_min) -# for i in range(0, 1 + i_max): -# m[i] = m_min + i * m_step -# P_m[i] = _np.dot(extended_E, _np.dot(Riterate, extended_rho)) -# Riterate = _np.dot(Rstep, Riterate) - -# return m, P_m - -### COMMENTED OUT SO THAT THIS FILE DOESN'T NEED "from .. import construction as _cnst" -### THIS SHOULD BE ADDED BACK IN AT SOME POINT. -# def L_matrix_asps(model, target_model, m_max, m_min=0, m_step=1, success_outcomelabel=('0',), # noqa N802 -# compilation=None, group_twirled=False, weights=None, gauge_optimize=True, -# return_error_bounds=False, norm='diamond'): -# """ -# Computes RB average survival probablities, as predicted by the 'L-matrix' theory. - -# This theory was introduced in Proctor et al Phys. Rev. Lett. 119, 130502 -# (2017). Within the function, the model is gauge-optimized to target_model. This is -# *not* optimized to the gauge specified by Proctor et al, but instead performs the -# standard pyGSTi gauge-optimization (using the frobenius distance). In most cases, -# this is likely to be a reasonable proxy for the gauge optimization perscribed by -# Proctor et al. - -# Parameters -# ---------- -# model : Model -# The noisy model. - -# target_model : Model -# The target model. - -# m_max : int -# The maximal sequence length of the random gates, not including the inversion gate. - -# m_min : int, optional -# The minimal sequence length. Defaults to the smallest valid value of 0. - -# m_step : int, optional -# The step size between sequence lengths. - -# success_outcomelabel : str or tuple, optional -# The outcome label associated with success. - -# compilation : dict, optional -# If `model` is not the full group, then a compilation for the group elements, -# used to implement the inversion gate (and the initial random group element, -# if `group_twirled` is True). This is a dictionary with the group labels as -# keys and a gate sequence of the elements of `model` as values. - -# group_twirled : bool, optional -# If True, the random sequence starts with a single uniformly random group -# element before the m random elements of `model`. - -# weights : dict, optional -# If not None, a dictionary of floats, whereby the keys are the gates in model -# and the values are the unnormalized probabilities to apply each gate at -# for each layer of the RB protocol. If None, the weighting defaults to an -# equal weighting on all gates, as used in most RB protocols (e.g., Clifford -# RB). - -# gauge_optimize : bool, optional -# If True a gauge-optimization to the target model is implemented before -# calculating all quantities. If False, no gauge optimization is performed. -# Whether or not a gauge optimization is performed does not affect the rate of -# decay but it will generally affect the exact form of the decay. E.g., if a -# perfect model is given to the function -- but in the "wrong" gauge -- no -# decay will be observed in the output P_m, but the P_m can be far from 1 (even -# for perfect SPAM) for all m. The gauge optimization is optional, as it is -# not guaranteed to always improve the accuracy of the reported P_m, although when -# gauge optimization is performed this limits the possible deviations of the -# reported P_m from the true P_m. - -# return_error_bounds : bool, optional -# Sets whether or not to return error bounds for how far the true ASPs can deviate -# from the values returned by this function. - -# norm : str, optional -# The norm used in the error bound calculation. Either 'diamond' for the diamond -# norm (the default) or '1to1' for the Hermitian 1 to 1 norm. - -# Returns -# ------- -# m : float -# Array of sequence length values that the ASPs have been calculated for. -# P_m : float -# Array containing predicted ASP values for the specified sequence length values. -# if error_bounds is True : -# lower_bound: float -# Array containing lower bounds on the possible ASP values - -# upper_bound: float -# Array containing upper bounds on the possible ASP values -# """ -# d = int(round(_np.sqrt(model.dim))) - -# if gauge_optimize: -# model_go = _algs.gaugeopt_to_target(model, target_model) -# else: -# model_go = model.copy() -# L = L_matrix(model_go, target_model, weights=weights) -# success_prepLabel = list(model.preps.keys())[0] # just take first prep -# success_effectLabel = success_outcomelabel[-1] if isinstance(success_outcomelabel, tuple) \ -# else success_outcomelabel -# identity_vec = _mtls.vec(_np.identity(d**2, float)) - -# if compilation is not None: -# model_group = _cnst.create_explicit_alias_model(model_go, compilation) -# model_target_group = _cnst.create_explicit_alias_model(target_model, compilation) -# delta = gate_dependence_of_errormaps(model_group, model_target_group, norm=norm) -# emaps = errormaps(model_group, model_target_group) -# E_eff = _np.dot(model_go.povms['Mdefault'][success_effectLabel].T, emaps.operations['Gavg']) - -# if group_twirled is True: -# L_group = L_matrix(model_group, model_target_group) - -# if compilation is None: -# delta = gate_dependence_of_errormaps(model_go, target_model, norm=norm) -# emaps = errormaps(model_go, target_model) -# E_eff = _np.dot(model_go.povms['Mdefault'][success_effectLabel].T, emaps.operations['Gavg']) - -# i_max = _np.floor((m_max - m_min) / m_step).astype('int') -# m = _np.zeros(1 + i_max, _np.int64) -# P_m = _np.zeros(1 + i_max, float) -# upper_bound = _np.zeros(1 + i_max, float) -# lower_bound = _np.zeros(1 + i_max, float) - -# Lstep = _np.linalg.matrix_power(L, m_step) -# Literate = _np.linalg.matrix_power(L, m_min) -# for i in range(0, 1 + i_max): -# m[i] = m_min + i * m_step -# if group_twirled: -# L_m_rdd = _mtls.unvec(_np.dot(L_group, _np.dot(Literate, identity_vec))) -# else: -# L_m_rdd = _mtls.unvec(_np.dot(Literate, identity_vec)) -# P_m[i] = _np.dot(E_eff, _np.dot(L_m_rdd, model_go.preps[success_prepLabel])) -# Literate = _np.dot(Lstep, Literate) -# upper_bound[i] = P_m[i] + delta / 2 -# lower_bound[i] = P_m[i] - delta / 2 -# if upper_bound[i] > 1: -# upper_bound[i] = 1. -# if lower_bound[i] < 0: -# lower_bound[i] = 0. -# if return_error_bounds: -# return m, P_m, lower_bound, upper_bound -# else: -# return m, P_m - def errormaps(model, target_model): """ @@ -798,72 +549,3 @@ def gate_dependence_of_errormaps(model, target_model, norm='diamond', mx_basis=N delta_avg = _np.mean(delta) return delta_avg - -# Future : perhaps put these back in. -#def Magesan_theory_predicted_decay(model, target_model, mlist, success_outcomelabel=('0',), -# norm='1to1', order='zeroth', return_all = False): -# -# assert(order == 'zeroth' or order == 'first') -# -# d = int(round(_np.sqrt(model.dim))) -# MTPs = {} -# MTPs['r'] = gateset_infidelity(model,target_model,itype='AGI') -# MTPs['p'] = _analysis.r_to_p(MTPs['r'],d,rtype='AGI') -# MTPs['delta'] = gate_dependence_of_errormaps(model, target_model, norm) -# error_gs = errormaps(model, target_model) -# -# R_list = [] -# Q_list = [] -# for gate in list(target_model.operations.keys()): -# R_list.append(_np.dot(_np.dot(error_gs.operations[gate],target_model.operations[gate]), -# _np.dot(error_gs.operations['Gavg'],_np.transpose(target_model.operations[gate])))) -# Q_list.append(_np.dot(target_model.operations[gate], -# _np.dot(error_gs.operations[gate],_np.transpose(target_model.operations[gate])))) -# -# error_gs.operations['GR'] = _np.mean(_np.array([ i for i in R_list]),axis=0) -# error_gs.operations['GQ'] = _np.mean(_np.array([ i for i in Q_list]),axis=0) -# error_gs.operations['GQ2'] = _np.dot(error_gs.operations['GQ'],error_gs.operations['Gavg']) -# error_gs.preps['rhoc_mixed'] = 1./d*_cnst.create_identity_vec(error_gs.basis)# -# -# #Assumes standard POVM labels -# povm = _objs.UnconstrainedPOVM( [('0_cm', target_model.povms['Mdefault']['0']), -# ('1_cm', target_model.povms['Mdefault']['1'])] ) -# ave_error_gsl = _cnst.to_circuits([('rho0','Gavg'),('rho0','GR'),('rho0','Gavg','GQ')]) -# data = _cnst.simulate_data(error_gs, ave_error_gsl, num_samples=1, sample_error="none")# - -# pr_L_p = data[('rho0','Gavg')][success_outcomelabel] -# pr_L_I = data[('rho0','Gavg')][success_outcomelabel_cm] -# pr_R_p = data[('rho0','GR')][success_outcomelabel] -# pr_R_I = data[('rho0','GR')][success_outcomelabel_cm] -# pr_Q_p = data[('rho0','Gavg','GQ')][success_outcomelabel] -# p = MTPs['p'] -# B_1 = pr_R_I -# A_1 = (pr_Q_p/p) - pr_L_p + ((p -1)*pr_L_I/p) + ((pr_R_p - pr_R_I)/p) -# C_1 = pr_L_p - pr_L_I -# q = _tls.average_gate_infidelity(error_gs.operations['GQ2'],_np.identity(d**2,float)) -# q = _analysis.r_to_p(q,d,rtype='AGI') -# -# if order == 'zeroth': -# MTPs['A'] = pr_L_I -# MTPs['B'] = pr_L_p - pr_L_I -# if order == 'first': -# MTPs['A'] = B_1 -# MTPs['B'] = A_1 - C_1*(q - 1)/p**2 -# MTPs['C'] = C_1*(q- p**2)/p**2 -# -# if order == 'zeroth': -# Pm = MTPs['A'] + MTPs['B']*MTPs['p']**_np.array(mlist) -# if order == 'first': -# Pm = MTPs['A'] + (MTPs['B'] + _np.array(mlist)*MTPs['C'])*MTPs['p']**_np.array(mlist) -# -# sys_eb = (MTPs['delta'] + 1)**(_np.array(mlist)+1) - 1 -# if order == 'first': -# sys_eb = sys_eb - (_np.array(mlist)+1)*MTPs['delta'] -# -# upper = Pm + sys_eb -# upper[upper > 1]=1. -# -# lower = Pm - sys_eb -# lower[lower < 0]=0. -# -# return mlist, Pm, upper, lower, MTPs diff --git a/setup.py b/setup.py index bac69bb6e..ebd67f1ef 100644 --- a/setup.py +++ b/setup.py @@ -157,7 +157,6 @@ def setup_with_extensions(extensions=None): 'pygsti.evotypes.stabilizer_slow', 'pygsti.evotypes.chp', 'pygsti.extras', - 'pygsti.extras.rb', 'pygsti.extras.rpe', 'pygsti.extras.drift', 'pygsti.extras.ibmq', diff --git a/test/test_packages/extras/test_interpygate.py b/test/test_packages/extras/test_interpygate.py index ea8ccfc83..97e76e936 100644 --- a/test/test_packages/extras/test_interpygate.py +++ b/test/test_packages/extras/test_interpygate.py @@ -51,8 +51,7 @@ def advance(self, state, v, t): L = dephasing * self.dephasing_generator + decoherence * self.decoherence_generator process = change_basis(_expm((H + L) * t), 'pp', 'col') - vec_state = _np.outer(state, state.conj()).ravel(order='F') - state = unvec_square(_np.dot(process, vec_state), 'F') + state = unvec_square(_np.dot(process, _np.outer(state, state.conj()).ravel(order='F')), 'F') return state def create_process_matrix(self, v, comm=None): @@ -103,8 +102,7 @@ def advance(self, state, v, times): L = dephasing * self.dephasing_generator + decoherence * self.decoherence_generator processes = [change_basis(_expm((H + L) * t), 'pp', 'col') for t in times] - vec_state = _np.outer(state, state.conj()).ravel(order='F') - states = [unvec_square(_np.dot(process, vec_state),'F') for process in processes] + states = [unvec_square(_np.dot(process, _np.outer(state, state.conj())).ravel(order='F'),'F') for process in processes] return states diff --git a/test/test_packages/iotest/test_codecs.py b/test/test_packages/iotest/test_codecs.py index b8d718a9f..f14d26f1e 100644 --- a/test/test_packages/iotest/test_codecs.py +++ b/test/test_packages/iotest/test_codecs.py @@ -327,27 +327,6 @@ def test_pickle_dataset_with_circuitlabels(self): self.assertEqual(c2.str, "([Gx:0Gy:1])^2") pygsti.circuits.Circuit.default_expand_subcircuits = True - #Debugging, because there was some weird python3 vs 2 json incompatibility with string labels - # - turned out to be that the unit test files needed to import unicode_literals from __future__ - #def test_labels(self): - # strLabel = pygsti.baseobjs.Label("Gi") - # #strLabel = ("Gi",) - # from pygsti.modelpacks.legacy import std1Q_XYI as std - # - # s = json.dumps(strLabel) - # print("s = ",str(s)) - # x = msgpack.loads(s) - # print("x = ",x) - # - # print("-----------------------------") - # - # s = json.dumps(std.prepStrs[2]) - # print("s = ",s) - # x = json.loads(s) - # print("x = ",x) - # assert(False),"STOP" - - if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/test/unit/modelmembers/test_operation.py b/test/unit/modelmembers/test_operation.py index 720c3ee91..d3b44deef 100644 --- a/test/unit/modelmembers/test_operation.py +++ b/test/unit/modelmembers/test_operation.py @@ -745,12 +745,6 @@ def build_gate(): mx = np.identity(state_space.dim, 'd') return op.EmbeddedOp(state_space, ['Q0'], op.FullArbitraryOp(mx, evotype=evotype, state_space=None)) - #This is really a state-space unit test - #def test_constructor_raises_on_bad_state_space_label(self): - # mx = np.identity(4, 'd') - # with self.assertRaises(ValueError): - # op.EmbeddedOp([('L0', 'foobar')], ['Q0'], op.FullArbitraryOp(mx)) - def test_constructor_raises_on_state_space_label_mismatch(self): mx = np.identity(4, 'd') state_space = statespace.StateSpace.cast([('Q0',), ('Q1',)]) diff --git a/test/unit/objects/test_evaltree.py b/test/unit/objects/test_evaltree.py deleted file mode 100644 index f0e73b376..000000000 --- a/test/unit/objects/test_evaltree.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np - -from ..util import BaseCase - - -#TODO: create an evaltree and use this function to check it -- this was -# taken from an internal checking function within evaltree.py -#def check_tree(evaltree, original_list): #generate_circuit_list(self, permute=True): -# """ -# Generate a list of the final operation sequences this tree evaluates. -# -# This method essentially "runs" the tree and follows its -# prescription for sequentailly building up longer strings -# from shorter ones. When permute == True, the resulting list -# should be the same as the one passed to initialize(...), and -# so this method may be used as a consistency check. -# -# Parameters -# ---------- -# permute : bool, optional -# Whether to permute the returned list of strings into the -# same order as the original list passed to initialize(...). -# When False, the computed order of the operation sequences is -# given, which is matches the order of the results from calls -# to `Model` bulk operations. Non-trivial permutation -# occurs only when the tree is split (in order to keep -# each sub-tree result a contiguous slice within the parent -# result). -# -# Returns -# ------- -# list of gate-label-tuples -# A list of the operation sequences evaluated by this tree, each -# specified as a tuple of operation labels. -# """ -# circuits = [None] * len(self) -# -# #Set "initial" (single- or zero- gate) strings -# for i, opLabel in zip(self.get_init_indices(), self.get_init_labels()): -# if opLabel == "": circuits[i] = () # special case of empty label -# else: circuits[i] = (opLabel,) -# -# #Build rest of strings -# for i in self.get_evaluation_order(): -# iLeft, iRight = self[i] -# circuits[i] = circuits[iLeft] + circuits[iRight] -# -# #Permute to get final list: -# nFinal = self.num_final_strings() -# if self.original_index_lookup is not None and permute: -# finalCircuits = [None] * nFinal -# for iorig, icur in self.original_index_lookup.items(): -# if iorig < nFinal: finalCircuits[iorig] = circuits[icur] -# assert(None not in finalCircuits) -# return finalCircuits -# else: -# assert(None not in circuits[0:nFinal]) -# return circuits[0:nFinal] diff --git a/test/unit/objects/test_model.py b/test/unit/objects/test_model.py index fdad0a22c..b9f50c8e0 100644 --- a/test/unit/objects/test_model.py +++ b/test/unit/objects/test_model.py @@ -377,16 +377,6 @@ def test_hproduct(self): hp_flat = self.model.sim.hproduct(circuit, flat=True) # TODO assert correctness for all of the above - #REMOVED from fwdsim (unused) - #def test_bulk_hproduct(self): - # gatestring1 = ('Gx', 'Gy') - # gatestring2 = ('Gx', 'Gy', 'Gy') - # circuits = [gatestring1, gatestring2] - # hp = self.model.sim.bulk_hproduct(circuits) - # hp_flat = self.model.sim.bulk_hproduct(circuits, flat=True) - # hp_scaled, scaleVals = self.model.sim.bulk_hproduct(circuits, scale=True) - # # TODO assert correctness for all of the above - class SimMethodBase(object): """Tests for model methods which can use different forward sims""" diff --git a/test/unit/objects/test_polynomial.py b/test/unit/objects/test_polynomial.py index 0b4c3219f..404d0302b 100644 --- a/test/unit/objects/test_polynomial.py +++ b/test/unit/objects/test_polynomial.py @@ -1,10 +1,578 @@ import numpy as np +import platform as _platform +assert(_platform.architecture()[0].endswith("bit")) # e.g. "64bit" +PLATFORM_BITS = int(_platform.architecture()[0].strip("bit")) + from pygsti.baseobjs.opcalc import compact_deriv from pygsti.baseobjs import polynomial as poly +from pygsti.evotypes.basereps import PolynomialRep as _PolynomialRep from ..util import BaseCase +# TODO: use this class to faciliate unit tests. Right now it's just +# sitting here. +class SLOWPolynomial(dict): + """ + This reference class provides an alternative implementation of poly.Polynomial. + + Parameters + ---------- + coeffs : dict + A dictionary of coefficients. Keys are tuples of integers that + specify the polynomial term the coefficient value multiplies + (see above). If None, the zero polynomial (no terms) is created. + + max_num_vars : int + The maximum number of independent variables this polynomial can + hold. Placing a limit on the number of variables allows more + compact storage and efficient evaluation of the polynomial. + """ + + @classmethod + def _vindices_per_int(cls, max_num_vars): + """ + The number of variable indices that fit into a single int when there are at most `max_num_vars` variables. + + This quantity is needed to directly construct Polynomial representations + and is thus useful internally for forward simulators. + + Parameters + ---------- + max_num_vars : int + The maximum number of independent variables. + + Returns + ------- + int + """ + return int(np.floor(PLATFORM_BITS / np.log2(max_num_vars + 1))) + + @classmethod + def from_rep(cls, rep): + """ + Creates a Polynomial from a "representation" (essentially a lite-version) of a Polynomial. + + Note: usually we only need to convert from full-featured Python objects + to the lighter-weight "representation" objects. Polynomials are an + exception, since as the results of probability computations they need + to be converted back from "representation-form" to "full-form". + + Parameters + ---------- + rep : PolynomialRep + A polynomial representation. + + Returns + ------- + Polynomial + """ + max_num_vars = rep.max_num_vars # one of the few/only cases where a rep + # max_order = rep.max_order # needs to expose some python properties + + def int_to_vinds(indx_tup): + ret = [] + for indx in indx_tup: + while indx != 0: + nxt = indx // (max_num_vars + 1) + i = indx - nxt * (max_num_vars + 1) + ret.append(i - 1) + indx = nxt + #assert(len(ret) <= max_order) #TODO: is this needed anymore? + return tuple(sorted(ret)) + + tup_coeff_dict = {int_to_vinds(k): val for k, val in rep.coeffs.items()} + ret = cls(tup_coeff_dict) + ret.fastpoly = poly.Polynomial.from_rep(rep) + ret._check_fast_polynomial() + return ret + + def __init__(self, coeffs=None, max_num_vars=100): + """ + Initializes a new Polynomial object (a subclass of dict). + + Internally (as a dict) a Polynomial represents variables by integer + indices, e.g. "2" means "x_2". Keys are tuples of variable indices and + values are numerical coefficients (floating point or complex numbers). + A variable to a power > 1 has its index repeated in the key-tuple. + + E.g. x_0^2 + 3*x_1 + 4 is stored as `{(0,0): 1.0, (1,): 3.0, (): 4.0}` + + Parameters + ---------- + coeffs : dict + A dictionary of coefficients. Keys are tuples of integers that + specify the polynomial term the coefficient value multiplies + (see above). If None, the zero polynomial (no terms) is created. + + max_num_vars : int + The maximum number of independent variables this polynomial can + hold. Placing a limit on the number of variables allows more + compact storage and efficient evaluation of the polynomial. + """ + super(SLOWPolynomial, self).__init__() + if coeffs is not None: + self.update(coeffs) + self.max_num_vars = max_num_vars + self.fastpoly = poly.Polynomial(coeffs, max_num_vars) + self._check_fast_polynomial() + + def _check_fast_polynomial(self, raise_err=True): + """ + Check that included poly.Polynomial has remained in-sync with this one. + + This is purely for debugging, to ensure that the poly.Polynomial + class implements its operations correctly. + + Parameters + ---------- + raise_err : bool, optional + Whether to raise an AssertionError if the check fails. + + Returns + ------- + bool + Whether or not the check has succeeded (True if the + fast and slow implementations are in sync). + """ + if set(self.fastpoly.coeffs.keys()) != set(self.keys()): + print("FAST", self.fastpoly.coeffs, " != SLOW", dict(self)) + if raise_err: assert(False), "STOP" + return False + for k in self.fastpoly.coeffs.keys(): + if not np.isclose(self.fastpoly.coeffs[k], self[k]): + print("FAST", self.fastpoly.coeffs, " != SLOW", dict(self)) + if raise_err: assert(False), "STOP" + return False + if self.max_num_vars != self.fastpoly.max_num_vars: + print("#Var mismatch: FAST", self.fastpoly.max_num_vars, " != SLOW", self.max_num_vars) + if raise_err: assert(False), "STOP" + return False + + return True + + def deriv(self, wrt_param): + """ + Take the derivative of this Polynomial with respect to a single variable. + + The result is another Polynomial. + + E.g. deriv(x_2^3 + 3*x_1, wrt_param=2) = 3x^2 + + Parameters + ---------- + wrt_param : int + The variable index to differentiate with respect to. + E.g. "4" means "differentiate w.r.t. x_4". + + Returns + ------- + Polynomial + """ + dcoeffs = {} + for ivar, coeff in self.items(): + cnt = float(ivar.count(wrt_param)) + if cnt > 0: + l = list(ivar) + del l[l.index(wrt_param)] + dcoeffs[tuple(l)] = cnt * coeff + + ret = SLOWPolynomial(dcoeffs, self.max_num_vars) + ret.fastpoly = self.fastpoly.deriv(wrt_param) + ret._check_fast_polynomial() + return ret + + def degree(self): + """ + The largest sum-of-exponents for any term (monomial) within this polynomial. + + E.g. for x_2^3 + x_1^2*x_0^2 has degree 4. + + Returns + ------- + int + """ + ret = max((len(k) for k in self), default=0) + assert(self.fastpoly.degree == ret) + self._check_fast_polynomial() + return ret + + def evaluate(self, variable_values): + """ + Evaluate this polynomial for a given set of variable values. + + Parameters + ---------- + variable_values : array-like + An object that can be indexed so that `variable_values[i]` gives the + numerical value for i-th variable (x_i). + + Returns + ------- + float or complex + Depending on the types of the coefficients and `variable_values`. + """ + #FUTURE: make this function smarter (Russian peasant) + ret = 0 + for ivar, coeff in self.items(): + ret += coeff * np.prod([variable_values[i] for i in ivar]) + assert(np.isclose(ret, self.fastpoly.evaluate(variable_values))) + self._check_fast_polynomial() + return ret + + def compact(self, complex_coeff_tape=True): + """ + Generate a compact form of this polynomial designed for fast evaluation. + + The resulting "tapes" can be evaluated using + :func:`opcalc.bulk_eval_compact_polynomials`. + + Parameters + ---------- + complex_coeff_tape : bool, optional + Whether the `ctape` returned array is forced to be of complex type. + If False, the real part of all coefficients is taken (even if they're + complex). + + Returns + ------- + vtape, ctape : numpy.ndarray + These two 1D arrays specify an efficient means for evaluating this + polynomial. + """ + #if force_complex: + # iscomplex = True + #else: + # iscomplex = any([abs(np.imag(x)) > 1e-12 for x in self.values()]) + iscomplex = complex_coeff_tape + + nTerms = len(self) + nVarIndices = sum(map(len, self.keys())) + vtape = np.empty(1 + nTerms + nVarIndices, np.int64) # "variable" tape + ctape = np.empty(nTerms, complex if iscomplex else 'd') # "coefficient tape" + + i = 0 + vtape[i] = nTerms; i += 1 + for iTerm, k in enumerate(sorted(self.keys())): + l = len(k) + ctape[iTerm] = self[k] if iscomplex else np.real(self[k]) + vtape[i] = l; i += 1 + vtape[i:i + l] = k; i += l + assert(i == len(vtape)), "Logic Error!" + fast_vtape, fast_ctape = self.fastpoly.compact(iscomplex) + assert(np.allclose(fast_vtape, vtape) and np.allclose(fast_ctape, ctape)) + self._check_fast_polynomial() + return vtape, ctape + + def copy(self): + """ + Returns a copy of this polynomial. + + Returns + ------- + Polynomial + """ + fast_cpy = self.fastpoly.copy() + ret = SLOWPolynomial(self, self.max_num_vars) + ret.fastpoly = fast_cpy + ret._check_fast_polynomial() + return ret + + def map_indices(self, mapfn): + """ + Performs a bulk find & replace on this polynomial's variable indices. + + This is useful when the variable indices have external significance + (like being the indices of a gate's parameters) and one want to convert + to another set of indices (like a parent model's parameters). + + Parameters + ---------- + mapfn : function + A function that takes as input an "old" variable-index-tuple + (a key of this Polynomial) and returns the updated "new" + variable-index-tuple. + + Returns + ------- + Polynomial + """ + ret = SLOWPolynomial({mapfn(k): v for k, v in self.items()}, self.max_num_vars) + ret.fastpoly = self.fastpoly.map_indices(mapfn) + self._check_fast_polynomial() + ret._check_fast_polynomial() + return ret + + def map_indices_inplace(self, mapfn): + """ + Performs an in-place find & replace on this polynomial's variable indices. + + This is useful when the variable indices have external significance + (like being the indices of a gate's parameters) and one want to convert + to another set of indices (like a parent model's parameters). + + Parameters + ---------- + mapfn : function + A function that takes as input an "old" variable-index-tuple + (a key of this Polynomial) and returns the updated "new" + variable-index-tuple. + + Returns + ------- + None + """ + self._check_fast_polynomial() + new_items = {mapfn(k): v for k, v in self.items()} + self.clear() + self.update(new_items) + self.fastpoly.map_indices_inplace(mapfn) + self._check_fast_polynomial() + + def mult(self, x): + """ + Multiplies this polynomial by another polynomial `x`. + + Parameters + ---------- + x : Polynomial + The polynomial to multiply by. + + Returns + ------- + Polynomial + The polynomial representing self * x. + """ + newpoly = SLOWPolynomial({}, self.max_num_vars) + for k1, v1 in self.items(): + for k2, v2 in x.items(): + k = tuple(sorted(k1 + k2)) + if k in newpoly: newpoly[k] += v1 * v2 + else: newpoly[k] = v1 * v2 + + newpoly.fastpoly = self.fastpoly.mult(x.fastpoly) + self._check_fast_polynomial() + newpoly._check_fast_polynomial() + return newpoly + + def scale(self, x): + """ + Scale this polynomial by `x` (multiply all coefficients by `x`). + + Parameters + ---------- + x : float or complex + The value to scale by. + + Returns + ------- + None + """ + # assume a scalar that can multiply values + for k in tuple(self.keys()): # I think the tuple() might speed things up (why?) + self[k] *= x + self.fastpoly.scale(x) + self._check_fast_polynomial() + + def scalar_mult(self, x): + """ + Multiplies this polynomial by a scalar `x`. + + Parameters + ---------- + x : float or complex + The value to multiply by. + + Returns + ------- + Polynomial + """ + newpoly = self.copy() + newpoly.scale(x) + self._check_fast_polynomial() + newpoly._check_fast_polynomial() + return newpoly + + def __str__(self): + def fmt(x): + if abs(np.imag(x)) > 1e-6: + if abs(np.real(x)) > 1e-6: return "(%.3f+%.3fj)" % (x.real, x.imag) + else: return "(%.3fj)" % x.imag + else: return "%.3f" % x.real + + termstrs = [] + sorted_keys = sorted(list(self.keys())) + for k in sorted_keys: + varstr = ""; last_i = None; n = 1 + for i in sorted(k): + if i == last_i: n += 1 + elif last_i is not None: + varstr += "x%d%s" % (last_i, ("^%d" % n) if n > 1 else "") + n = 1 + last_i = i + if last_i is not None: + varstr += "x%d%s" % (last_i, ("^%d" % n) if n > 1 else "") + #print("DB: k = ",k, " varstr = ",varstr) + if abs(self[k]) > 1e-4: + termstrs.append("%s%s" % (fmt(self[k]), varstr)) + + self._check_fast_polynomial() + if len(termstrs) > 0: + return " + ".join(termstrs) + else: return "0" + + def __repr__(self): + return "Poly[ " + str(self) + " ]" + + def __add__(self, x): + newpoly = self.copy() + if isinstance(x, SLOWPolynomial): + for k, v in x.items(): + if k in newpoly: newpoly[k] += v + else: newpoly[k] = v + newpoly.fastpoly = self.fastpoly + x.fastpoly + else: # assume a scalar that can be added to values + for k in newpoly: + newpoly[k] += x + newpoly.fastpoly = self.fastpoly + x + self._check_fast_polynomial() + newpoly._check_fast_polynomial() + return newpoly + + def __iadd__(self, x): + """ Does self += x more efficiently """ + if isinstance(x, SLOWPolynomial): + for k, v in x.items(): + try: + self[k] += v + except KeyError: + self[k] = v + self.fastpoly += x.fastpoly + else: # assume a scalar that can be added to values + for k in self: + self[k] += x + self.fastpoly += x + self._check_fast_polynomial() + return self + + def __mul__(self, x): + #if isinstance(x, SLOWPolynomial): + # newpoly = SLOWPolynomial() + # for k1,v1 in self.items(): + # for k2,v2 in x.items(): + # k = tuple(sorted(k1+k2)) + # if k in newpoly: newpoly[k] += v1*v2 + # else: newpoly[k] = v1*v2 + #else: + # # assume a scalar that can multiply values + # newpoly = self.copy() + # for k in newpoly: + # newpoly[k] *= x + #return newpoly + if isinstance(x, SLOWPolynomial): + ret = self.mult(x) + else: # assume a scalar that can multiply values + ret = self.scalar_mult(x) + self._check_fast_polynomial() + ret._check_fast_polynomial() + return ret + + def __rmul__(self, x): + return self.__mul__(x) + + def __imul__(self, x): + self._check_fast_polynomial() + if isinstance(x, SLOWPolynomial): + x._check_fast_polynomial() + newcoeffs = {} + for k1, v1 in self.items(): + for k2, v2 in x.items(): + k = tuple(sorted(k1 + k2)) + if k in newcoeffs: newcoeffs[k] += v1 * v2 + else: newcoeffs[k] = v1 * v2 + self.clear() + self.update(newcoeffs) + self.fastpoly *= x.fastpoly + self._check_fast_polynomial() + else: + self.scale(x) + self._check_fast_polynomial() + return self + + def __pow__(self, n): + ret = SLOWPolynomial({(): 1.0}, self.max_num_vars) # max_order updated by mults below + cur = self + for i in range(int(np.floor(np.log2(n))) + 1): + rem = n % 2 # gets least significant bit (i-th) of n + if rem == 1: ret *= cur # add current power of x (2^i) if needed + cur = cur * cur # current power *= 2 + n //= 2 # shift bits of n right + ret.fastpoly = self.fastpoly ** n + ret._check_fast_polynomial() + self._check_fast_polynomial() + return ret + + def __copy__(self): + ret = self.copy() + ret._check_fast_polynomial() + self._check_fast_polynomial() + return ret + + def to_rep(self): + """ + Construct a representation of this polynomial. + + "Representations" are lightweight versions of objects used to improve + the efficiency of intensely computational tasks. Note that Polynomial + representations must have the same `max_order` and `max_num_vars` in + order to interact with each other (add, multiply, etc.). + + Parameters + ---------- + max_num_vars : int, optional + The maximum number of variables the represenatation is allowed to + have (x_0 to x_(`max_num_vars-1`)). This sets the maximum allowed + variable index within the representation. + + Returns + ------- + PolynomialRep + """ + # Set max_num_vars (determines based on coeffs if necessary) + max_num_vars = self.max_num_vars + default_max_vars = 0 if len(self) == 0 else \ + max([(max(k) + 1 if k else 0) for k in self.keys()]) + if max_num_vars is None: + max_num_vars = default_max_vars + else: + assert(default_max_vars <= max_num_vars) + + vindices_per_int = SLOWPolynomial._vindices_per_int(max_num_vars) + + def vinds_to_int(vinds): + """ Convert tuple index of ints to single int given max_numvars """ + ints_in_key = int(np.ceil(len(vinds) / vindices_per_int)) + ret_tup = [] + for k in range(ints_in_key): + ret = 0; m = 1 + for i in vinds[k * vindices_per_int:(k + 1) * vindices_per_int]: # last tuple index=most significant + assert(i < max_num_vars), "Variable index exceed maximum!" + ret += (i + 1) * m + m *= max_num_vars + 1 + assert(ret >= 0), "vinds = %s -> %d!!" % (str(vinds), ret) + ret_tup.append(ret) + return tuple(ret_tup) + + int_coeffs = {vinds_to_int(k): v for k, v in self.items()} + + # (max_num_vars+1) ** vindices_per_int <= 2**PLATFORM_BITS, so: + # vindices_per_int * log2(max_num_vars+1) <= PLATFORM_BITS + vindices_per_int = int(np.floor(PLATFORM_BITS / np.log2(max_num_vars + 1))) + self._check_fast_polynomial() + + return _PolynomialRep(int_coeffs, max_num_vars, vindices_per_int) + + class CompactPolynomialTester(BaseCase): def test_compact_polys(self): # TODO break apart diff --git a/test/unit/tools/test_likelihoodfns.py b/test/unit/tools/test_likelihoodfns.py index 6b0eafc41..31327dc01 100644 --- a/test/unit/tools/test_likelihoodfns.py +++ b/test/unit/tools/test_likelihoodfns.py @@ -68,11 +68,6 @@ def test_logl_max(self): maxL2 = lfn.logl_max(self.model, self.ds, self.circuits, poisson_picture=False) # TODO assert correctness - #Removed this function - #def test_cptp_penalty(self): - # lfn.cptp_penalty(self.model, include_spam_penalty=True) - # # TODO assert correctness - def test_two_delta_logl(self): twoDelta1 = lfn.two_delta_logl_term(n=100, p=0.5, f=0.6, min_prob_clip=1e-6, poisson_picture=True) twoDelta2 = lfn.two_delta_logl_term(n=100, p=0.5, f=0.6, min_prob_clip=1e-6, poisson_picture=False)