From c260399d37542ade576cf2c9ed8a310fa3c1fb1a Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 3 Dec 2025 07:56:10 -0800 Subject: [PATCH 01/10] documentation for DenseOperator --- pygsti/modelmembers/operations/denseop.py | 65 +++++++++-------------- 1 file changed, 24 insertions(+), 41 deletions(-) diff --git a/pygsti/modelmembers/operations/denseop.py b/pygsti/modelmembers/operations/denseop.py index e5f600182..cce6e0088 100644 --- a/pygsti/modelmembers/operations/denseop.py +++ b/pygsti/modelmembers/operations/denseop.py @@ -255,7 +255,6 @@ def __complex__(self): return complex(self._ptr) class DenseOperator(DenseOperatorInterface, _KrausOperatorInterface, _LinearOperator): """ - TODO: update docstring An operator that behaves like a dense super-operator matrix. This class is the common base class for more specific dense operators. @@ -278,10 +277,29 @@ class DenseOperator(DenseOperatorInterface, _KrausOperatorInterface, _LinearOper The state space for this operation. If `None` a default state space with the appropriate number of qubits is used. - Attributes + Properties ---------- - base : numpy.ndarray - Direct access to the underlying process matrix data. + DenseOperatorInterface is deprecated and this class' dependence on it + will be removed in the immediate future. For the time being, the + implementation of __getattr__ in DenseOperatorInterface means that + DenseOperator has every property of its attached OpRepDenseSuperop, + which is stored in DenseOperator._rep. + + Private attributes + ------------------ + _evotype (required by ModelMember; positional arg in DenseOperator.__init__) + _rep (required by LinearOperator; either Cython or Python "OpRepDenseSuperop") + _basis (indirect requirement of _rep) + _ptr (required by DenseOperatorInterface, implemented as self._rep.base) + _state_space (required by ModelMember; inferrable from None as __init__ kwarg) + + Other private attributes + ------------------------ + ModelMember._gpindices + ModelMember._paramlbls + ModelMember._param_bounds + ModelMember._dirty + ModelMember._submember_rpindices """ @classmethod @@ -393,44 +411,9 @@ def _is_similar(self, other, rtol, atol): @property def kraus_operators(self): """A list of this operation's Kraus operators as numpy arrays.""" - # Let I index a basis element, rho be a d x d matrix, and (I // d, I mod d) := (i,ii) be the "2D" - # index corresponding to I. - # op(rho) = sum_IJ choi_IJ BI rho BJ_dag = sum_IJ (evecs_IK D_KK evecs_inv_KJ) BI rho BJ_dag - # Note: evecs can be and are assumed chosen to be orthonormal, so evecs_inv = evecs^dag - # if {Bi} is the set of matrix units then ... - # = sum_IJK(i',j') (evecs_IK D_KK evecs_inv_KJ) unitI_ii' rho_i'j' unitJ_j'j - # = sum_IJK(i',j') (evecs_(Ia,Ib)K D_KK evecs_inv_K(Ja,Jb)) unitI_ii' rho_i'j' unitJ_j'j - # using fact that sum(i') unitI_ii' ==> i'=Ib and delta_i,Ia factor - # = sum_IJK (evecs_(Ia,Ib)K D_KK evecs_inv_K(Ja,Jb)) rho_IbJa * delta_i,Ia, delta_j,Jb - # = sum_K D_KK [ sum_(Ib,Ja) evector[K]_(i,Ib) rho_IbJa dual_evector[K]_(Ja,j) ] - # -> let reshaped K-th evector be called O_K and dual O_K^dag - # = sum_K D_KK O_K rho O_K^dag assert(self._basis is not None), "Kraus operator functionality requires specifying a superoperator basis" - superop_mx = self.to_dense("HilbertSchmidt"); d = int(_np.round(_np.sqrt(superop_mx.shape[0]))) - std_basis = _Basis.cast('std', superop_mx.shape[0]) - choi_mx = _jt.jamiolkowski_iso(superop_mx, self._basis, std_basis) * d # see NOTE below - # NOTE: multiply by `d` (density mx dimension) to un-normalize choi_mx as given by - # jamiolkowski_iso. Here we *want* the trace of choi_mx to be `d`, not 1, so that - # op(rho) = sum_IJ choi_IJ BI rho BJ_dag is true. - - #CHECK 1 (to unit test?) REMOVE - #tmp_std = _bt.change_basis(superop_mx, self._basis, 'std') - #B = _bt.basis_matrices('std', superop_mx.shape[0]) - #check_superop = sum([ choi_mx[i,j] * _np.kron(B[i], B[j].conjugate()) for i in range(d*d) for j in range(d*d)]) - #assert(_np.allclose(check_superop, tmp_std)) - - evals, evecs = _np.linalg.eigh(choi_mx) - assert(_np.allclose(evecs @ _np.diag(evals) @ (evecs.conjugate().T), choi_mx)) - TOL = 1e-7 # consider lowering this tolerance as it leads to errors of this order in the Kraus decomp - if any([ev <= -TOL for ev in evals]): - raise ValueError("Cannot compute Kraus decomposition of non-positive-definite superoperator!") - kraus_ops = [evecs[:, i].reshape(d, d) * _np.sqrt(ev) for i, ev in enumerate(evals) if abs(ev) > TOL] - - #CHECK 2 (to unit test?) REMOVE - #std_superop = sum([_ot.unitary_to_std_process_mx(kop) for kop in kraus_ops]) - #assert(_np.allclose(std_superop, tmp_std)) - - return kraus_ops + kops = self._kraus_operators() + return kops def set_kraus_operators(self, kraus_operators): """ From e9bc2a39578fcca8e9a6c3bd7eecbfc1c0717600 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 3 Dec 2025 07:56:57 -0800 Subject: [PATCH 02/10] add the ability to specify custom germs in GSTModelPack.create_gst_experiment_design --- pygsti/modelpacks/_modelpack.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pygsti/modelpacks/_modelpack.py b/pygsti/modelpacks/_modelpack.py index 358b7bf96..36bb98709 100644 --- a/pygsti/modelpacks/_modelpack.py +++ b/pygsti/modelpacks/_modelpack.py @@ -360,7 +360,8 @@ def create_gst_experiment_design(self, max_max_length, qubit_labels=None, fpr=Fa """ for k in kwargs.keys(): if k not in ('germ_length_limits', 'keep_fraction', 'keep_seed', 'include_lgst', 'nest', 'circuit_rules', - 'op_label_aliases', 'dscheck', 'action_if_missing', 'verbosity', 'add_default_protocol'): + 'op_label_aliases', 'dscheck', 'action_if_missing', 'verbosity', 'add_default_protocol', + 'germs'): raise ValueError("Invalid argument '%s' to StandardGSTDesign constructor" % k) if qubit_labels is None: qubit_labels = self._sslbls @@ -380,11 +381,13 @@ def create_gst_experiment_design(self, max_max_length, qubit_labels=None, fpr=Fa else: max_lengths_list = list(_gen_max_length(max_max_length)) + germs = kwargs.get('germs', self.germs(qubit_labels, lite)) + return _gst.StandardGSTDesign( self.processor_spec(qubit_labels), self.prep_fiducials(qubit_labels), self.meas_fiducials(qubit_labels), - self.germs(qubit_labels, lite), + germs, max_lengths_list, kwargs.get('germ_length_limits', None), fidpairs, From c5a288ffe194f4331ac744a9b0b05480f55eca93 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 3 Dec 2025 07:59:07 -0800 Subject: [PATCH 03/10] update matrixtools.eigenvalues and matrixtools.eigendecomposition to allow ndarrays where flags.writeable is False --- pygsti/tools/matrixtools.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pygsti/tools/matrixtools.py b/pygsti/tools/matrixtools.py index a23025347..d43ac70f8 100644 --- a/pygsti/tools/matrixtools.py +++ b/pygsti/tools/matrixtools.py @@ -813,6 +813,8 @@ def eigenvalues(m: _np.ndarray, *, assume_hermitian: Optional[bool] = None, assu if assume_hermitian: # Make sure it's Hermtian in exact arithmetic. This helps with # reproducibility across different implementations of LAPACK. + if not m.flags.writeable: + m = m.copy() m += m.T.conj() m /= 2 return _np.linalg.eigvalsh(m) @@ -830,6 +832,8 @@ def eigendecomposition(m: _np.ndarray, *, assume_hermitian: Optional[bool] = Non if assume_hermitian: # Make sure it's Hermtian in exact arithmetic. This helps with # reproducibility across different implementations of LAPACK. + if not m.flags.writeable: + m = m.copy() m += m.T.conj() m /= 2 evals, evecs = _np.linalg.eigh(m) From 9d8e89395ff6288a46655e643bc1a0f3f2666750 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 3 Dec 2025 08:00:12 -0800 Subject: [PATCH 04/10] fix whitespace --- pygsti/models/model.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/pygsti/models/model.py b/pygsti/models/model.py index 562118a50..f3af1278a 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -1107,7 +1107,6 @@ def _get_shift(j): return _bisect.bisect_left(indices_to_remove, j) #rebuild the model index to model member map if needed. self._build_index_mm_map() - def _init_virtual_obj(self, obj): """ Initializes a "virtual object" - an object (e.g. LinearOperator) that *could* be a @@ -1232,8 +1231,6 @@ def set_parameter_value(self, index, val, close=False): """ self.set_parameter_values([index], [val], close) - - def set_parameter_values(self, indices, values, close=False): """ From a501a15a9c207be3c066ed070a2e987781c3d3b4 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 3 Dec 2025 08:03:45 -0800 Subject: [PATCH 05/10] optools.py: add minimal_kraus_decomposition function (important for this PR), rootconj_superop (important for this PR), plus partial_trace and trace_effect (nice to have but not strictly necessary for this PR). --- pygsti/tools/optools.py | 111 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index 487f0e661..b77c81c8c 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -575,6 +575,117 @@ def tensorized_with_eye(op: _np.ndarray, op_basis: _Basis, ten_basis: Optional[_ return ten_op, ten_basis +def rootconj_superop(effect_superket: _np.ndarray, basis: _Basis, abstol_warn: float=1e-10, abstol_error: float=1e-8) -> _np.ndarray: + """ + Let E denote the Hermitian matrix representation effect_superket, where 0 ≤ E ≤ 1. + + This function returns the array representation (in `basis`) of the map that takes + a Hermitian matrix ρ to the Hermitian matrix E½ ρ E½. + """ + effect_mat = _bt.vec_to_stdmx(effect_superket, basis, keep_complex=True) + vecs, vals, inv_vecs = _mt.eigendecomposition(effect_mat, assume_hermitian=True) + + msg = f'Eigenvalues {vals} fall outside [0.0, 1.0], up to tolerance %s.' + if _np.any(vals < 0.0 - abstol_error) or _np.any(vals > 1.0 + abstol_error): + raise ValueError( msg % abstol_error ) + if _np.any(vals < 0.0 - abstol_warn) or _np.any(vals > 1.0 + abstol_warn ): + _warnings.warn( msg % abstol_warn ) + vals = _np.clip(vals, a_min=0.0, a_max=1.0) + + rooteffect_mat = (vecs * _np.sqrt(vals)[_np.newaxis, :]) @ inv_vecs + mx_std = _np.kron(rooteffect_mat, rooteffect_mat.T) + mx : _np.ndarray = _bt.change_basis(mx_std, 'std', basis, expect_real=True) # type: ignore + return mx + + +def partial_trace(mx: _np.ndarray, dims: tuple[int,...], axis: int) -> _np.ndarray: + """ + + Notes + ----- + This implementation is stolen from CVXPY, which was stolen from some Julia library. + """ + if mx.ndim < 2 or mx.shape[0] != mx.shape[1]: + raise ValueError("partial_trace only supports 2-d square arrays.") + if axis < 0 or axis >= len(dims): + msg = f"Invalid axis argument, should be between 0 and {len(dims)}, got {axis}." + raise ValueError(msg) + if mx.shape[0] != _np.prod(dims): + raise ValueError("Dimension of system doesn't correspond to dimension of subsystems.") + + def _term(j: int) -> _np.ndarray: + a = _sps.coo_matrix(([1.0], ([0], [0]))) + b = _sps.coo_matrix(([1.0], ([0], [0]))) + for (i_axis, dim) in enumerate(dims): + if i_axis == axis: + v = _sps.coo_matrix(([1], ([j], [0])), shape=(dim, 1)) + a = _sps.kron(a, v.T) + b = _sps.kron(b, v) + else: + eye_mat = _sps.eye_array(dim) + a = _sps.kron(a, eye_mat) + b = _sps.kron(b, eye_mat) + return a @ mx @ b + + return _np.sum([_term(j) for j in range(dims[axis])]) + + +def trace_effect(op: _np.ndarray, op_basis: BasisLike, on_space: SpaceT = 'HilbertSchmidt'): + """ + Let `op` be the array representation of a superoperator G in `op_basis`, + where G maps from and to the space of order-d Hermitian operators. + + The trace effect of G is the Heritian operator E that satifies + + trace(G(ρ)) = trace(E ρ) for all order-d Hermitian matrices ρ. + + If on_space='HilbertSchmidt', then this function returns a superket representation + of E in `op_basis`. If on_space='Hilbert', then we return E itself. + """ + d = int(_np.round(op.size ** 0.25)) + assert op.shape == (d**2, d**2) + basis = op_basis if isinstance(op_basis, _Basis) else _Basis.cast(op_basis, dim=d**2) + vecI = _bt.stdmx_to_vec(_np.eye(d), basis) + vecE = op.T.conj() @ vecI + if on_space == 'HilbertSchmidt': + return vecE + else: + E = _bt.vec_to_stdmx(vecE, op_basis) + return E + + +def minimal_kraus_decomposition(op_x: _np.ndarray, op_basis: _Basis, error_tol:float=1e-6, trunc_tol:float=1e-7) -> list[_np.ndarray]: + """ + The array `op_x` represents a completely positive superoperator X on + Hilbert-Schmidt space, using `op_basis` as the basis for that space. + + A Kraus decomposition of X is a set of square matrices, {K_i}_i, that satisfy + + X(ρ) = \\sum_i K_i ρ K_i^\\dagger. + + The matrices appearing in any such set are called Kraus operators of X. + + This function returns a minimal-length list of Kraus operators of X. + """ + d = int(_np.round(op_x.size ** 0.25)) + d2 = d**2 + assert op_x.shape == (d2, d2) + from pygsti.tools.jamiolkowski import jamiolkowski_iso + choi_mx : _np.ndarray = jamiolkowski_iso(op_x, op_basis, 'std', normalized=True) * d # type: ignore + + evecs, evals, _ = _mt.eigendecomposition(choi_mx, assume_hermitian=True) + if any([ev < -error_tol for ev in evals]): + raise ValueError("Cannot compute Kraus decomposition of non-positive-definite superoperator!") + keep = evals >= trunc_tol + evals = evals[keep] + evecs = evecs[:, keep] + out = [] + for i, ev in enumerate(evals): + temp = _np.sqrt(ev) * evecs[:, i].reshape((d, d), order='F') + out.append(temp) + return out + + def average_gate_fidelity(a, b, mx_basis='pp', is_tp=None, is_unitary=None): """ Computes the average gate fidelity (AGF) between two gates. From c1d2be0a532419538a93232202f25e0ea1995b64 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 3 Dec 2025 08:04:35 -0800 Subject: [PATCH 06/10] add LinearOperator._kraus_operators function with default implementation --- pygsti/modelmembers/operations/linearop.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pygsti/modelmembers/operations/linearop.py b/pygsti/modelmembers/operations/linearop.py index 6edd84459..a1bc6695b 100644 --- a/pygsti/modelmembers/operations/linearop.py +++ b/pygsti/modelmembers/operations/linearop.py @@ -14,6 +14,7 @@ import numpy as _np +from pygsti.baseobjs.basis import Basis as _Basis from pygsti.baseobjs.opcalc import bulk_eval_compact_polynomials_complex as _bulk_eval_compact_polynomials_complex from pygsti.modelmembers import modelmember as _modelmember from pygsti.tools import optools as _ot @@ -103,6 +104,16 @@ def shape(self) -> tuple[int, int]: # are given broad freedom to define semantics of self._rep. return (self.dim, self.dim) + def _kraus_operators(self): + """A list of this operation's Kraus operators as numpy arrays.""" + basis = getattr(self, '_basis', None) + if not isinstance(basis, _Basis): + msg = "Kraus operator functionality requires specifying a superoperator basis" + raise NotImplementedError(msg) + mx = self.to_dense('HilbertSchmidt') + kops = _ot.minimal_kraus_decomposition(mx, basis, 1e-7, 1e-7) + return kops + def set_dense(self, m): """ Set the dense-matrix value of this operation. From 172a35e67b94031c543eff987f0abee51f266006 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 3 Dec 2025 08:09:01 -0800 Subject: [PATCH 07/10] address warning from not specifying default kwarg in np.linalg.lstsq --- pygsti/modelmembers/povms/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygsti/modelmembers/povms/__init__.py b/pygsti/modelmembers/povms/__init__.py index d798b67a2..7dc36ebf6 100644 --- a/pygsti/modelmembers/povms/__init__.py +++ b/pygsti/modelmembers/povms/__init__.py @@ -612,7 +612,7 @@ def _objfn(v): tol=1e-13) if not soln.success and soln.fun > 1e-6: # not "or" because success is often not set correctly raise ValueError("Failed to find an errorgen such that Date: Wed, 3 Dec 2025 08:15:52 -0800 Subject: [PATCH 08/10] change Instrument.acton so it no longer requires a TP basis; add superket_trace to optools.py --- pygsti/modelmembers/instruments/instrument.py | 14 +++++++++----- pygsti/tools/optools.py | 9 +++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/pygsti/modelmembers/instruments/instrument.py b/pygsti/modelmembers/instruments/instrument.py index 2e0a5a3a0..38e01ba5c 100644 --- a/pygsti/modelmembers/instruments/instrument.py +++ b/pygsti/modelmembers/instruments/instrument.py @@ -19,6 +19,9 @@ from pygsti.baseobjs import statespace as _statespace from pygsti.tools import matrixtools as _mt from pygsti.tools import slicetools as _slct +from pygsti.tools import basistools as _bt +from pygsti.tools import optools as _ot +from pygsti.baseobjs.basis import Basis as _Basis from pygsti.baseobjs.label import Label as _Label from pygsti.baseobjs.statespace import StateSpace as _StateSpace @@ -378,13 +381,14 @@ def acton(self, state): staterep = state._rep outcome_probs_and_states = _collections.OrderedDict() + for lbl, element in self.items(): output_rep = element._rep.acton(staterep) - output_unnormalized_state = output_rep.to_dense() - prob = output_unnormalized_state[0] * state.dim**0.25 - output_normalized_state = output_unnormalized_state / prob # so [0]th == 1/state_dim**0.25 - outcome_probs_and_states[lbl] = (prob, _state.StaticState(output_normalized_state, self.evotype, - self.state_space)) + unnormalized_state_array = output_rep.to_dense() + prob = _ot.superket_trace(unnormalized_state_array, element.basis) + output_state_array = unnormalized_state_array / prob + output_state = _state.StaticState(output_state_array, self.evotype, self.state_space) + outcome_probs_and_states[lbl] = (prob, output_state) return outcome_probs_and_states diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index b77c81c8c..84a8dcd2f 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -462,6 +462,15 @@ def is_trace_preserving(a: _np.ndarray, mx_basis: BasisLike='pp', tol=1e-8) -> b return check +def superket_trace(superket: _np.ndarray, basis: _Basis): + if basis.first_element_is_identity: + t = superket.ravel()[0] + else: + mx = _bt.vec_to_stdmx(superket, basis, keep_complex=True) + t = _np.real(_np.trace(mx)) + return t + + def entanglement_fidelity(a, b, mx_basis: BasisLike='pp', is_tp=None, is_unitary=None): """ Returns the "entanglement" process fidelity between gate matrices. From fe63e73948ab7c493293379511dfc96a3c164c1a Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 3 Dec 2025 08:17:33 -0800 Subject: [PATCH 09/10] CPTPLND instruments --- pygsti/modelmembers/instruments/__init__.py | 139 ++++++++++++---- .../instruments/cptpinstrument.py | 150 ++++++++++++++++++ 2 files changed, 260 insertions(+), 29 deletions(-) create mode 100644 pygsti/modelmembers/instruments/cptpinstrument.py diff --git a/pygsti/modelmembers/instruments/__init__.py b/pygsti/modelmembers/instruments/__init__.py index 8ef2c0ed2..c2f454a47 100644 --- a/pygsti/modelmembers/instruments/__init__.py +++ b/pygsti/modelmembers/instruments/__init__.py @@ -13,8 +13,17 @@ from .instrument import Instrument from .tpinstrument import TPInstrument from .tpinstrumentop import TPInstrumentOp - -from pygsti.tools import optools as _ot +from .cptpinstrument import RootConjOperator, SummedOperator + +import scipy.linalg as _la +import numpy as _np +from pygsti.tools import optools as _ot, basistools as _bt +from types import NoneType +from pygsti.baseobjs.label import Label +from pygsti.baseobjs.basis import Basis +from pygsti.modelmembers import operations as _op +from pygsti.modelmembers import povms as _pv +from pygsti.modelmembers.povms.basepovm import _BasePOVM # Avoid circular import import pygsti.modelmembers as _mm @@ -37,7 +46,6 @@ def instrument_type_from_op_type(op_type): # Limited set (only matching what is in convert) instr_conversion = { - 'auto': 'full', 'static unitary': 'static unitary', 'static clifford': 'static clifford', 'static': 'static', @@ -45,16 +53,12 @@ def instrument_type_from_op_type(op_type): 'full TP': 'full TP', 'full CPTP': 'full CPTP', 'full unitary': 'full unitary', + 'CPTPLND': 'CPTPLND' } instr_type_preferences = [] for typ in op_type_preferences: - instr_type = None - if _ot.is_valid_lindblad_paramtype(typ): - # Lindblad types are passed through as TP only (matching current convert logic) - instr_type = "full TP" - else: - instr_type = instr_conversion.get(typ, None) + instr_type = instr_conversion.get(typ, None) if instr_type is None: continue @@ -110,26 +114,103 @@ def convert(instrument, to_type, basis, ideal_instrument=None, flatten_structure The converted instrument, usually a distinct object from the object passed as input. """ - to_types = to_type if isinstance(to_type, (tuple, list)) else (to_type,) # HACK to support multiple to_type values + if not isinstance(to_type, str): + if len(to_type) > 1: + raise ValueError(f"Expected to_type to be a string, but got {to_type}") + to_type = to_type[0] + assert isinstance(to_type, str) + destination_types = {'full TP': TPInstrument} - NoneType = type(None) - - for to_type in to_types: - try: - if isinstance(instrument, destination_types.get(to_type, NoneType)): - return instrument - - if to_type == "full TP": - return TPInstrument(list(instrument.items()), instrument.evotype, instrument.state_space) - elif to_type in ("full", "static", "static unitary"): - from ..operations import convert as _op_convert - ideal_items = dict(ideal_instrument.items()) if (ideal_instrument is not None) else {} - members = [(k, _op_convert(g, to_type, basis, ideal_items.get(k, None), flatten_structure)) - for k, g in instrument.items()] - return Instrument(members, instrument.evotype, instrument.state_space) + + if isinstance(instrument, destination_types.get( to_type, NoneType ) ): + return instrument + + if to_type == "full TP": + inst_arrays = dict() + for k, v in instrument.items(): + if hasattr(v, 'to_dense'): + inst_arrays[k] = v.to_dense('HilbertSchmidt') else: - raise ValueError("Cannot convert an instrument to type %s" % to_type) - except: - pass # try next to_type + inst_arrays[k] = v + return TPInstrument(list(inst_arrays.items()), instrument.evotype, instrument.state_space) + + if to_type in ("full", "static", "static unitary"): + from ..operations import convert as _op_convert + ideal_items = dict(ideal_instrument.items()) if (ideal_instrument is not None) else {} + members = [(k, _op_convert(g, to_type, basis, ideal_items.get(k, None), flatten_structure)) + for k, g in instrument.items()] + return Instrument(members, instrument.evotype, instrument.state_space) + + if to_type == 'CPTPLND': + op_arrays = {k: v.to_dense('HilbertSchmidt') for (k,v) in instrument.items()} + inst = cptp_instrument(op_arrays, basis) + return inst + + raise ValueError("Cannot convert an instrument to type %s" % to_type) + + +def cptp_instrument(op_arrays: dict[str, _np.ndarray], basis: Basis, error_tol:float=1e-6, trunc_tol:float=1e-7, povm_errormap=None) -> Instrument: + unitaries = dict() + effects = dict() + + # Step 1. Build CPTPLND-parameterized unitaries and static POVM effects + # from Kraus decompositions of the provided operators. + for oplbl, op in op_arrays.items(): + krausops = _ot.minimal_kraus_decomposition(op, basis, error_tol, trunc_tol) + cur_effects = [] + cur_unitaries = [] + for K in krausops: + # Define F(rho) := K rho K^\\dagger. If we compute a polar decomposition + # K = u p, then we can express F(rho) = u (p rho p) u^\\dagger, which is + # a composition of a unitary channel and the "RootConjOperator" of p@p. + u, p = _la.polar(K, side='right') + u_linop = _op.StaticUnitaryOp(u, basis) # type: ignore + u_cptp = _op.convert(u_linop, 'CPTPLND', basis) + cur_unitaries.append(u_cptp) + E_superket = _bt.stdmx_to_vec(p @ p, basis) + E = _pv.StaticPOVMEffect(E_superket) + cur_effects.append(E) + effects[oplbl] = cur_effects + unitaries[oplbl] = cur_unitaries + + # Step 2. Build a CPTPLND-parameterized POVM from the static POVM effects. + # + # We can't use povms.convert(...) because we might have more + # effects than the Hilbert space dimension. + # + base_povm_effects = {} + for oplbl, cur_effects in effects.items(): + for i, E in enumerate(cur_effects): + elbl = Label((oplbl, i)) + base_povm_effects[elbl] = E + base_povm = _BasePOVM(base_povm_effects, preserve_sum=False) + udim = int(_np.round(basis.dim ** 0.5)) + if povm_errormap is None: + I_ideal = _op.StaticUnitaryOp(_np.eye(udim), basis) # type: ignore + I_cptplnd = _op.convert(I_ideal, 'CPTPLND', basis) + povm_errormap = I_cptplnd.factorops[1] + povm_cptp = _pv.ComposedPOVM(povm_errormap, base_povm) + inst_ops = dict() + + # Step 3. Assemble the CPTPLND-parameterized unitaries and POVM + # effects to represent each operator as a completely-positive + # trace-reducing map. + # + for lbl in op_arrays: + cur_effects = effects[lbl] + cur_unitaries = unitaries[lbl] + + op_summands = [] + for i, U_cptplnd in enumerate(cur_unitaries): + E_cptp = povm_cptp[Label((lbl, i))] + rcop = RootConjOperator(E_cptp, basis) + cptr = _op.ComposedOp([rcop, U_cptplnd]) + op_summands.append(cptr) + if len(op_summands) == 1: + op = op_summands[0] + else: + op = SummedOperator(op_summands, basis) - raise ValueError("Could not convert instrument to to type(s): %s" % str(to_types)) + inst_ops[lbl] = op + inst = Instrument(inst_ops) + return inst diff --git a/pygsti/modelmembers/instruments/cptpinstrument.py b/pygsti/modelmembers/instruments/cptpinstrument.py new file mode 100644 index 000000000..ba6ea5d7c --- /dev/null +++ b/pygsti/modelmembers/instruments/cptpinstrument.py @@ -0,0 +1,150 @@ +""" +Defines the CPTPInstrument class +""" +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + +import numpy as _np +from pygsti.pgtypes import SpaceT +from pygsti.baseobjs.basis import Basis +from pygsti.modelmembers.povms.effect import POVMEffect +from pygsti.modelmembers.operations import LinearOperator +from pygsti.tools import optools as _ot + +from typing import Union +BasisLike = Union[Basis, str] + + +class RootConjOperator(LinearOperator): + """ + A linear operator parameterized by a matrix E where 0 ≤ E ≤ 1, whose action takes ρ to E½ ρ E½ . + + Every CPTR map can be obtained by pre-composing a CPTP map with this kind of linear operator. + """ + + EIGTOL_WARNING = 1e-10 + EIGTOL_ERROR = 1e-8 + + def __init__(self, effect: POVMEffect, basis: BasisLike): + self._basis = Basis.cast(basis, effect.dim) + self._effect = effect + self._state_space = effect.state_space + self._evotype = effect.evotype + + dim = self._state_space.dim + self._rep = self._evotype.create_dense_superop_rep( _np.zeros((dim, dim)), self._basis, self._state_space ) + self._update_rep_base() + LinearOperator.__init__(self, self._rep, self._evotype) + self.init_gpindices() + + def submembers(self): + return [self._effect] + + def _update_rep_base(self): + # This function is directly analogous to TPInstrumentOp._construct_matrix. + self._rep.base.flags.writeable = True + assert(self._rep.base.shape == (self.dim, self.dim)) + effect_superket = self._effect.to_dense() + mx = _ot.rootconj_superop(effect_superket, self._basis) + self._rep.base[:] = mx + self._rep.base.flags.writeable = False + self._rep.base_has_changed() + return + + def deriv_wrt_params(self, wrt_filter=None): + raise NotImplementedError() + + def has_nonzero_hessian(self): + # This is not affine in its parameters. + return True + + def from_vector(self, v, close=False, dirty_value=True): + for sm, local_inds in zip(self.submembers(), self._submember_rpindices): + sm.from_vector(v[local_inds], close, dirty_value) + self._update_rep_base() + return + + @property + def num_params(self): + return len(self.gpindices_as_array()) + + def to_vector(self): + v = _np.empty(self.num_params, 'd') + for param_op, local_inds in zip(self.submembers(), self._submember_rpindices): + v[local_inds] = param_op.to_vector() + return v + + def to_dense(self, on_space: SpaceT = 'HilbertSchmidt') -> _np.ndarray: + assert on_space in ('HilbertSchmidt', 'minimal') + out = self._rep.base.copy() + out.flags.writeable = True + return out + + +class SummedOperator(LinearOperator): + + + def __init__(self, operators, basis: BasisLike): + op = operators[0] + self._basis = Basis.cast(basis, op.dim) + self._operators = operators + self._state_space = op.state_space + self._evotype = op.evotype + self._subreps = [op._rep for op in self._operators] + self._rep = self._evotype.create_sum_rep( self._subreps, self._state_space ) + LinearOperator.__init__(self, self._rep, self._evotype) + self.init_gpindices() + # NOTE: This class doesn't have a function analogous to _update_rep_base + # that we use in RootConjOperator. We can get away with not having such + # a function because it's the responsibility of op.from_vector(...) + # to update op's attached OpRep. + return + + def submembers(self): + out = [] + hit = set() + for op in self._operators: + temp = op.submembers() + for sm in temp: + if id(temp) not in hit: + hit.add(id(temp)) + out.append(sm) + return out + + def deriv_wrt_params(self, wrt_filter=None): + raise NotImplementedError() + + def has_nonzero_hessian(self): + return any(op.has_nonzero_hession() for op in self._operators) + + def from_vector(self, v, close=False, dirty_value=True): + for sm, local_inds in zip(self.submembers(), self._submember_rpindices): + sm.from_vector(v[local_inds], close, dirty_value) + return + + @property + def num_params(self): + return len(self.gpindices_as_array()) + + def to_vector(self): + v = _np.empty(self.num_params, 'd') + for param_op, local_inds in zip(self.submembers(), self._submember_rpindices): + v[local_inds] = param_op.to_vector() + return v + + def to_dense(self, on_space: SpaceT = 'HilbertSchmidt') -> _np.ndarray: + assert on_space in ('HilbertSchmidt', 'minimal') + on_space = 'HilbertSchmidt' + out = self._operators[0].to_dense(on_space) + if not out.flags.writeable: + out = out.copy() + for op in self._operators[1:]: + temp = op.to_dense(on_space) + out += temp + return out From ab3d7f3a8598bd9c1bf9373f8db2f1f2b5097110 Mon Sep 17 00:00:00 2001 From: Riley Murray Date: Wed, 3 Dec 2025 08:49:37 -0800 Subject: [PATCH 10/10] update Instruments.ipynb advanced example notebook --- .../objects/advanced/Instruments.ipynb | 335 ++++++++++-------- pygsti/modelmembers/instruments/__init__.py | 2 +- 2 files changed, 197 insertions(+), 140 deletions(-) diff --git a/jupyter_notebooks/Tutorials/objects/advanced/Instruments.ipynb b/jupyter_notebooks/Tutorials/objects/advanced/Instruments.ipynb index 3e4093b39..989bf4acb 100644 --- a/jupyter_notebooks/Tutorials/objects/advanced/Instruments.ipynb +++ b/jupyter_notebooks/Tutorials/objects/advanced/Instruments.ipynb @@ -12,13 +12,15 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "import pygsti\n", "from pygsti.modelpacks import smq1Q_XYI as std\n", - "import numpy as np" + "import numpy as np\n", + "from pygsti.modelmembers.instruments import Instrument\n", + "from pprint import pprint" ] }, { @@ -31,58 +33,40 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "#Make a copy so we don't modify the original\n", - "target_model = std.target_model()\n", + "mdl_ideal = std.target_model()\n", "\n", - "#Create and add the ideal instrument\n", - "E0 = target_model.effects['0']\n", - "E1 = target_model.effects['1']\n", - " # Alternate indexing that uses POVM label explicitly\n", - " # E0 = target_model['Mdefault']['0'] # 'Mdefault' = POVM label, '0' = effect label\n", - " # E1 = target_model['Mdefault']['1']\n", - "Gmz_plus = np.dot(E0,E0.T) #note effect vectors are stored as column vectors\n", + "# Create and add the ideal instrument\n", + "E0 = mdl_ideal.effects['0']\n", + "E1 = mdl_ideal.effects['1']\n", + "Gmz_plus = np.dot(E0,E0.T) # note effect vectors are stored as column vectors\n", "Gmz_minus = np.dot(E1,E1.T)\n", - "target_model[('Iz',0)] = pygsti.modelmembers.instruments.Instrument({'p0': Gmz_plus, 'p1': Gmz_minus})\n", - "\n", - "#For later use, record the identity POVM vector\n", - "povm_ident = target_model.effects['0'] + target_model.effects['1'] " + "inst = Instrument({'p0': Gmz_plus, 'p1': Gmz_minus})\n", + "mdl_ideal[('Iz',0)] = inst" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In order to generate some simulated data later on, we'll now create a noisy version of `target_model` by depolarizing the state preparation, gates, and POVM, and also rotating the basis that is measured by the instrument and POVM." + "In order to generate some simulated data later on, we'll now create a noisy version of `mdl_ideal` by depolarizing the state preparation, gates, and POVM, and also rotating the basis that is measured by the instrument and POVM." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ - "mdl_noisy = target_model.depolarize(op_noise=0.01, spam_noise=0.01)\n", - "mdl_noisy.effects.depolarize(0.01) #because above call only depolarizes the state prep, not the POVMs\n", - "\n", - "# add a rotation error to the POVM\n", - "Uerr = pygsti.rotation_gate_mx([0,0.02,0])\n", - "E0 = np.dot(mdl_noisy['Mdefault']['0'].T,Uerr).T\n", - "E1 = povm_ident - E0\n", - "mdl_noisy.povms['Mdefault'] = pygsti.modelmembers.povms.UnconstrainedPOVM({'0': E0, '1': E1},\n", - " evotype='default')\n", - "\n", - "# Use the same rotated effect vectors to \"rotate\" the instrument Iz too\n", - "E0 = mdl_noisy.effects['0']\n", - "E1 = mdl_noisy.effects['1']\n", - "Gmz_plus = np.dot(E0,E0.T)\n", - "Gmz_minus = np.dot(E1,E1.T)\n", - "mdl_noisy[('Iz',0)] = pygsti.modelmembers.instruments.Instrument({'p0': Gmz_plus, 'p1': Gmz_minus})\n", - "\n", - "#print(mdl_noisy) #print the model" + "mdl_noisy = mdl_ideal.depolarize(op_noise=0.005, spam_noise=0.01)\n", + "mdl_noisy = mdl_noisy.rotate(max_rotate=0.025, seed=2048)\n", + "mdl_noisy.effects.depolarize(0.01)\n", + "mdl_ideal.convert_members_inplace('CPTPLND')\n", + "mdl_noisy.convert_members_inplace('CPTPLND')" ] }, { @@ -95,20 +79,66 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{('p0', '0'): 0.5000000000000003,\n", + " ('p0', '1'): 1.4597476004180728e-17,\n", + " ('p1', '0'): 5.073268178784763e-18,\n", + " ('p1', '1'): 0.5}\n", + "\n", + "{('p0', '0'): 0.481367923790963,\n", + " ('p0', '1'): 0.0024189342904068554,\n", + " ('p1', '0'): 0.0025810657095932145,\n", + " ('p1', '1'): 0.5136320762090365}\n", + "\n" + ] + } + ], "source": [ - "dict(target_model.probabilities( pygsti.circuits.Circuit(( ('Gxpi2',0) , ('Iz',0) )) ))" + "for mdl in [mdl_ideal, mdl_noisy]:\n", + " pprint(dict(mdl.probabilities( pygsti.circuits.Circuit(( ('Gxpi2',0) , ('Iz',0) )) )))\n", + " print()" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{('p0', 'p0', '0'): 0.5000000000000002,\n", + " ('p0', 'p0', '1'): -1.6613637599827203e-18,\n", + " ('p0', 'p1', '0'): -1.1185571585378685e-17,\n", + " ('p0', 'p1', '1'): 0.4999999999999999,\n", + " ('p1', 'p0', '0'): 1.625883976416346e-17,\n", + " ('p1', 'p0', '1'): 1.8939874217871704e-34,\n", + " ('p1', 'p1', '0'): 4.5374861265545935e-34,\n", + " ('p1', 'p1', '1'): 1.625883976416347e-17}\n", + "\n", + "{('p0', 'p0', '0'): 0.4775917799207901,\n", + " ('p0', 'p0', '1'): 0.002399958693069308,\n", + " ('p0', 'p1', '0'): 0.0025624981205110728,\n", + " ('p0', 'p1', '1'): 0.5099371259816915,\n", + " ('p1', 'p0', '0'): 0.0038579004920827257,\n", + " ('p1', 'p0', '1'): 1.938643463358172e-05,\n", + " ('p1', 'p1', '0'): 1.8156751786106926e-05,\n", + " ('p1', 'p1', '1'): 0.003613193605435256}\n", + "\n" + ] + } + ], "source": [ - "dict(target_model.probabilities( pygsti.circuits.Circuit(( ('Iz',0), ('Gxpi2',0) , ('Iz',0) )) ))" + "for mdl in [mdl_ideal, mdl_noisy]:\n", + " pprint(dict(mdl.probabilities( pygsti.circuits.Circuit(( ('Iz',0), ('Gxpi2',0) , ('Iz',0) )) )))\n", + " print()" ] }, { @@ -120,11 +150,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "probs = {('0',): 0.5000000000000004, ('1',): 0.5}\n", + "probs['0'] = 0.5000000000000004\n", + "probs[('0',)] = 0.5000000000000004\n" + ] + } + ], "source": [ - "probs = target_model.probabilities( pygsti.circuits.Circuit([('Gxpi2',0)]) )\n", + "probs = mdl_ideal.probabilities( pygsti.circuits.Circuit([('Gxpi2',0)]) )\n", "print(\"probs = \",dict(probs))\n", "print(\"probs['0'] = \", probs['0']) #This works...\n", "print(\"probs[('0',)] = \", probs[('0',)]) # and so does this." @@ -135,158 +175,175 @@ "metadata": {}, "source": [ "## Performing tomography\n", - "\n", - "### Simulated data generation\n", - "Now let's perform tomography on a model that includes instruments. First, we'll generate some data using `mdl_noisy` in exactly the same way as we would for any other model:" + "Now let's perform tomography on a model that includes instruments. \n", + "First, we'll build an experiment design. This notebook's experiment design makes a minor modification to the default design for our working modelpack (smq1Q_XYI). " ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "metadata": {}, "outputs": [], "source": [ "germs = std.germs()\n", - "germs += [pygsti.circuits.Circuit([('Iz', 0)])] # add the instrument as a germ.\n", - "\n", - "prep_fiducials = std.prep_fiducials()\n", - "meas_fiducials = std.meas_fiducials()\n", - "max_lengths = [1] # keep it simple & fast\n", - "\n", - "lsgst_list = pygsti.circuits.create_lsgst_circuits(\n", - " mdl_noisy,prep_fiducials,meas_fiducials,germs,max_lengths)\n", - "\n", - "#print(\"LinearOperator sequences:\")\n", - "#print(lsgst_list) #note that this contains LGST strings with \"Iz\"\n", - "\n", - "#Create the DataSet\n", - "ds = pygsti.data.simulate_data(mdl_noisy,lsgst_list,1000,'multinomial',seed=2018)\n", - "\n", - "#Write it to a text file to demonstrate the format:\n", - "pygsti.io.write_dataset(\"../../tutorial_files/intermediate_meas_dataset.txt\",ds)" + "germs += [pygsti.circuits.Circuit([('Iz', 0)])]\n", + "ed = std.create_gst_experiment_design(max_max_length=8, germs=germs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Notice the format of [intermediate_meas_dataset.txt](../../tutorial_files/intermediate_meas_dataset.txt), which includes a column for each distinct outcome tuple. Since not all experiments contain data for all outcome tuples, the `\"--\"` is used as a placeholder. Now that the data is generated, we run LGST or LSGST just like we would for any other model:\n", - "\n", - "### LGST" + "### Simulated data generation\n", + "Next, we generate data using `mdl_noisy` in exactly the same way as we would for any other model. We write the data to disk so you can see how datasets look when they include measurement data." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "metadata": {}, "outputs": [], "source": [ - "#Run LGST\n", - "mdl_lgst = pygsti.run_lgst(ds, prep_fiducials, meas_fiducials, target_model)\n", - "#print(mdl_lgst)\n", - "\n", - "#Gauge optimize the result to the true data-generating model (mdl_noisy),\n", - "# and compare. Mismatch is due to finite sample noise.\n", - "mdl_lgst_opt = pygsti.gaugeopt_to_target(mdl_lgst,mdl_noisy)\n", - "print(mdl_noisy.strdiff(mdl_lgst_opt))\n", - "print(\"Frobdiff after GOpt = \",mdl_noisy.frobeniusdist(mdl_lgst_opt))" + "ds = pygsti.data.simulate_data(mdl_noisy, ed.all_circuits_needing_data, 10_000, 'multinomial', seed=2018)\n", + "pygsti.io.write_dataset(\"../../tutorial_files/intermediate_meas_dataset.txt\", ds)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Long-sequence GST\n", - "Instruments just add parameters to a `Model` like gates, state preparations, and POVMs do. The total number of parameters in our model is \n", + "Notice the format of [intermediate_meas_dataset.txt](../../tutorial_files/intermediate_meas_dataset.txt), which includes a column for each distinct outcome tuple. Since not all experiments contain data for all outcome tuples, the `\"--\"` is used as a placeholder. Now that the data is generated, we run LGST or LSGST just like we would for any other model:\n", "\n", - "$4$ (prep) + $2\\times 4$ (2 effects) + $5\\times 16$ (3 gates and 2 instrument members) $ = 92$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "target_model.num_params" + "### Running the GST fit and generating a report" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "-- Std Practice: Iter 1 of 2 (full TP) --: \n", + " --- Iterative GST: [##################################################] 100.0% 592 circuits ---\n", + "-- Std Practice: Iter 2 of 2 (CPTPLND) --: \n", + " --- Iterative GST: [##################################################] 100.0% 592 circuits ---\n" + ] + } + ], "source": [ - "#Run long sequence GST\n", - "results = pygsti.run_long_sequence_gst(ds,target_model,prep_fiducials,meas_fiducials,germs,max_lengths)" + "from pygsti.protocols import StandardGST, ProtocolData\n", + "gst = StandardGST(\n", + " modes=('full TP', 'CPTPLND'), target_model=mdl_ideal, verbosity=2\n", + ")\n", + "pd = ProtocolData(ed, ds)\n", + "res = gst.run(pd)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running idle tomography\n", + "Computing switchable properties\n", + "Found standard clifford compilation from smq1Q_XYI\n", + "Found standard clifford compilation from smq1Q_XYI\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/rjmurr/Documents/pygsti-general/pyGSTi/pygsti/models/model.py:145: UserWarning:\n", + "\n", + "Model.num_modeltest_params could not obtain number of *non-gauge* parameters - using total instead\n", + "\n", + "/Users/rjmurr/Documents/pygsti-general/pyGSTi/pygsti/tools/optools.py:152: UserWarning:\n", + "\n", + "The input matrix a is not trace-1 up to tolerance 1.4901161193847656e-08. Beware result!\n", + "\n", + "/Users/rjmurr/Documents/pygsti-general/pyGSTi/pygsti/tools/optools.py:154: UserWarning:\n", + "\n", + "The input matrix b is not trace-1 up to tolerance 1.4901161193847656e-08. Beware result!\n", + "\n", + "/Users/rjmurr/Documents/pygsti-general/pyGSTi/pygsti/tools/optools.py:179: UserWarning:\n", + "\n", + "\n", + " Input matrix is not PSD up to tolerance 1.4901161193847656e-08.\n", + " We'll project out the bad eigenspaces to only work with the PSD part.\n", + " \n", + "\n", + "/Users/rjmurr/Documents/pygsti-general/pyGSTi/pygsti/report/workspacetables.py:2741: RuntimeWarning:\n", + "\n", + "divide by zero encountered in log\n", + "\n", + "/Users/rjmurr/Documents/pygsti-general/pyGSTi/pygsti/forwardsims/mapforwardsim.py:732: UserWarning:\n", + "\n", + "Generating dense process matrix representations of circuits or gates \n", + "can be inefficient and should be avoided for the purposes of forward \n", + "simulation/calculation of circuit outcome probability distributions \n", + "when using the MapForwardSimulator.\n", + "\n" + ] + } + ], "source": [ - "#Compare estimated model (after gauge opt) to data-generating one\n", - "mdl_est = results.estimates['GateSetTomography'].models['go0']\n", - "mdl_est_opt = pygsti.gaugeopt_to_target(mdl_est,mdl_noisy)\n", - "print(\"Frobdiff after GOpt = \", mdl_noisy.frobeniusdist(mdl_est_opt))" + "from pygsti.report import construct_standard_report\n", + "report_dir = '../../tutorial_files/cptp-instrument-report'\n", + "report_object = construct_standard_report(res, title='CPTP intrument GST')\n", + "report_object.write_html(report_dir)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The same analysis can be done for a trace-preserving model, whose instruments are constrained to *add* to a perfectly trace-preserving map. The number of parameters in the model are now: \n", + "**Thats it!** You've done tomography on a model with intermediate measurments (instruments).\n", "\n", - "$3$ (prep) + $1\\times 4$ (effect and complement) + $3\\times 12$ (3 gates) + $(2\\times 16 - 3)$ (TP instrument) $ = 71$" + "As a bonus, the code below checks for violation of complete positivity of the instrument operations from the two fits. We see that the CPTP fit has no violation, while the full TP fit has small violations." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mdl_targetTP = target_model.copy()\n", - "mdl_targetTP.set_all_parameterizations(\"full TP\")\n", - "print(\"POVM type = \",type(mdl_targetTP[\"Mdefault\"]),\" Np=\",mdl_targetTP[\"Mdefault\"].num_params)\n", - "print(\"Instrument type = \",type(mdl_targetTP[(\"Iz\",0)]),\" Np=\",mdl_targetTP[(\"Iz\",0)].num_params)\n", - "print(\"Number of model parameters = \", mdl_targetTP.num_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "resultsTP = pygsti.run_long_sequence_gst(ds,mdl_targetTP,prep_fiducials,meas_fiducials,germs,max_lengths)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Again compare estimated model (after gauge opt) to data-generating one\n", - "mdl_est = resultsTP.estimates['GateSetTomography'].models['go0']\n", - "mdl_est_opt = pygsti.gaugeopt_to_target(mdl_est,mdl_noisy)\n", - "print(\"Frobdiff after GOpt = \", mdl_noisy.frobeniusdist(mdl_est_opt))" - ] - }, - { - "cell_type": "markdown", + "execution_count": 22, "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iz:0\n", + "p0: 0.0009697360237423556\n", + "p1: 2.6230206864367674e-05\n", + "\n", + "Iz:0\n", + "p0: 0\n", + "p1: 0\n", + "\n" + ] + } + ], "source": [ - "**Thats it!** You've done tomography on a model with intermediate measurments (instruments)." + "for mdl in [res.estimates['full TP'].models['stdgaugeopt'], res.estimates['CPTPLND'].models['stdgaugeopt']]:\n", + " for instlbl, inst in mdl.instruments.items():\n", + " print(instlbl)\n", + " for ioplbl, iop in inst.items():\n", + " violation = max(0, pygsti.tools.sum_of_negative_choi_eigenvalues_gate(iop.to_dense(), 'pp'))\n", + " print(ioplbl + ': ' + str(violation) )\n", + " print()" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "pgdev311", "language": "python", "name": "python3" }, @@ -300,7 +357,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.11.13" } }, "nbformat": 4, diff --git a/pygsti/modelmembers/instruments/__init__.py b/pygsti/modelmembers/instruments/__init__.py index c2f454a47..9864f0987 100644 --- a/pygsti/modelmembers/instruments/__init__.py +++ b/pygsti/modelmembers/instruments/__init__.py @@ -190,12 +190,12 @@ def cptp_instrument(op_arrays: dict[str, _np.ndarray], basis: Basis, error_tol:f I_cptplnd = _op.convert(I_ideal, 'CPTPLND', basis) povm_errormap = I_cptplnd.factorops[1] povm_cptp = _pv.ComposedPOVM(povm_errormap, base_povm) - inst_ops = dict() # Step 3. Assemble the CPTPLND-parameterized unitaries and POVM # effects to represent each operator as a completely-positive # trace-reducing map. # + inst_ops = dict() for lbl in op_arrays: cur_effects = effects[lbl] cur_unitaries = unitaries[lbl]