circuit/gates.py
class Operator
Generic gate class wrapping a complex matrix/tensor with a name and optional parameters/metadata.
- tensor product via '^'
- matrix composition via '@'.
| name | type | default |
|---|---|---|
| name | str | None |
| params | List[Any] | None |
| tensor | torch.Tensor | None |
init
No Definition provided
def __init__(self: Any, tensor: torch.Tensor, name: str, params: Optional[List[Any]]) -> AnyImplementation
def __init__(self, tensor: torch.Tensor, name: str, params: Optional[List[Any]]=None):
self.tensor = tensor
self.name = name
self.params = list(params) if params is not None else []class Gate
Parameterized unitary acting on a subset of wires, embedded into the full Hilbert space. A generic container class which handles all the reshaping/permuting logic to apply a target-space unitary to the correct subset of wires in a larger system.
The full dense matrix can be materialized if needed, but the main use is to apply the unitary to states or compose with other gates without ever explicitly constructing the full matrix. The target-space matrix is stored as a parameter and can be optimized over if desired.
| name | type | default |
|---|---|---|
| U | torch.Tensor | None |
| all | List[int] | None |
| device | str | None |
| dims | List[int] | None |
| index | List[int] | None |
| inv_perm | List[int] | None |
| name | str | None |
| params | List[Any] | None |
| perm | List[int] | None |
| rest_size | int | None |
| target_dims | List[int] | None |
| target_size | int | None |
| total_dim | int | None |
| unused | List[int] | None |
| wires | int | None |
init
No Definition provided
def __init__(self: Any, matrix: Any, index: List[int], wires: int, dim: Union[int, List[int]], device: str, name: str, params: Optional[list]) -> AnyImplementation
def __init__(self, matrix: Any, index: List[int], wires: int, dim: Union[int, List[int]], device: str='cpu', name: str='U', params: Optional[list]=None):
super().__init__()
self.device = device
self.wires = wires
self.index = index if isinstance(index, list) else [index]
self.dims = [dim] * wires if isinstance(dim, int) else dim
self.name = name
self.params = list(params) if params is not None else []
self.total_dim = math.prod(self.dims)
self.target_dims = [self.dims[i] for i in self.index]
self.target_size = math.prod(self.target_dims)
self.U = tensorise(matrix, device=device)
if self.U.shape != (self.target_size, self.target_size):
raise ValueError(f'Matrix shape {self.U.shape} does not match target size {(self.target_size, self.target_size)}.')
self.all = list(range(self.wires))
self.unused = [i for i in self.all if i not in self.index]
self.perm = self.index + self.unused
self.inv_perm = list(np.argsort(self.perm))
self.rest_size = self.total_dim // self.target_sizeforward
Apply the embedded unitary to a statevector:
def forward(self: Any, x: torch.Tensor) -> torch.TensorImplementation
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self._left(x)
return out.view(self.total_dim, 1)forwardd
Apply the unitary channel to a density matrix:
def forwardd(self: Any, rho: torch.Tensor) -> torch.TensorImplementation
def forwardd(self, rho: torch.Tensor) -> torch.Tensor:
rho_prime = self._left(rho)
rhoAdj = rho_prime.conj().T
out_dagger = self._left(rhoAdj)
return out_dagger.conj().Tmatrix
Materialize the full dense matrix by applying the module to computational basis vectors.
def matrix(self: Any) -> torch.TensorImplementation
def matrix(self) -> torch.Tensor:
eye = torch.eye(self.total_dim, device=self.device, dtype=C64)
cols = []
for i in range(self.total_dim):
cols.append(self.forward(eye[i]))
return torch.cat(cols, dim=1)class Gategen
Convenience factory for common qudit gates at a fixed local dimension.
| name | type | default |
|---|---|---|
| device | str | None |
| dim | int | None |
init
No Definition provided
def __init__(self: Any, dim: int, device: str) -> AnyImplementation
def __init__(self, dim: int=2, device: str='cpu'):
self.dim = dim
self.device = deviceI
Identity gate on one qudit such that
def I(self: Any) -> OperatorImplementation
def I(self) -> Operator:
return Operator(torch.eye(self.dim, dtype=C64, device=self.device), 'I')H
Hadamard/DFT gate (H for d=2, discrete Fourier transform for
def H(self: Any) -> OperatorImplementation
def H(self) -> Operator:
d = self.dim
if d == 2:
m = torch.tensor([[1, 1], [1, -1]], dtype=C64, device=self.device) / math.sqrt(2)
else:
w = cmath.exp(2j * math.pi / d)
idx = torch.arange(d, device=self.device)
m = w ** torch.outer(idx, idx) / math.sqrt(d)
return Operator(m.to(dtype=C64), 'H')X
Generalized X (cyclic shift) gate as
def X(self: Any) -> OperatorImplementation
def X(self) -> Operator:
if self.dim == 2:
m = torch.tensor([[0, 1], [1, 0]], dtype=C64, device=self.device)
else:
m = torch.roll(torch.eye(self.dim, dtype=C64, device=self.device), shifts=-1, dims=1)
return Operator(m, 'X')Z
Generalized Z (phase) gate:
def Z(self: Any) -> OperatorImplementation
def Z(self) -> Operator:
d = self.dim
if d == 2:
m = torch.tensor([[1, 0], [0, -1]], dtype=C64, device=self.device)
else:
w = cmath.exp(2j * math.pi / d)
idx = torch.arange(d, device=self.device)
m = torch.diag(w ** idx)
return Operator(m, 'Z')Y
Generalized Y (up to phase), built from Z and X (Pauli-Y for d=2) as
def Y(self: Any) -> OperatorImplementation
def Y(self) -> Operator:
d = self.dim
if d == 2:
m = torch.tensor([[0, -1j], [1j, 0]], dtype=C64, device=self.device)
else:
m = torch.matmul(self.Z.tensor, self.X.tensor) / 1j
return Operator(m, 'Y')inCircuit
Return True if kwargs contain circuit-embedding keys (index/wires/dim).
def inCircuit(self: Any, kwargs: dict) -> boolImplementation
def inCircuit(self, kwargs: dict) -> bool:
return all((k in kwargs for k in ('index', 'wires', 'dim')))GMR
Generalized rotation from a Gell-Mann generator (symmetric/asymmetric/diagonal). Gell-Mann gates are described by their type (sym/asym/diag) and the indices j, k specifying the generator.
We generate each of them as
def GMR(self: Any, j: int, k: int, angle: Any, type: str) -> Union[Operator, Gate]Implementation
def GMR(self, j: int, k: int, angle: Any, type: str='asym', *, matrix: bool=False, **kwargs: Any) -> Union[Operator, Gate]:
if not self.inCircuit(kwargs):
matrix = True
orig_angle = angle
if not isinstance(angle, torch.Tensor):
angle = torch.tensor(angle, dtype=C64, device=self.device)
else:
angle = angle.to(device=self.device)
if type == 'sym':
idx1, idx2 = (min(j, k), max(j, k))
if idx1 == idx2:
raise ValueError('Symmetric requires distinct j, k')
elif type == 'asym':
idx1, idx2 = (max(j, k), min(j, k))
elif type == 'diag':
idx1, idx2 = (j, j)
else:
raise ValueError('type must be sym, asym, or diag')
gen = gell_mann(idx1, idx2, self.dim, device=self.device)
if type in ['sym', 'asym']:
m = torch.eye(self.dim, dtype=C64, device=self.device)
c = torch.cos(angle / 2).to(dtype=C64)
s = torch.sin(angle / 2).to(dtype=C64)
a, b = (min(j, k), max(j, k))
m[a, a] = c
m[b, b] = c
if type == 'sym':
m[a, b] = -1j * s
m[b, a] = -1j * s
else:
m[a, b] = -s
m[b, a] = s
gate_name = f'GMR_{type}'
else:
ang = angle.to(dtype=C64)
m = torch.matrix_exp(-1j * (ang / 2) * gen)
gate_name = f'GMR_{type}'
gate_params: List[Tuple[str, Any]] = [('type', type), ('j', j), ('k', k), ('angle', angle), ('dim', self.dim)]
if matrix:
return Operator(m, gate_name, params=gate_params)
if not self.inCircuit(kwargs):
raise TypeError(f'{gate_name} missing circuit kwargs (index/wires/dim). Call with matrix=True for standalone Operator.')
index = kwargs.pop('index')
wires = kwargs.pop('wires')
dim = kwargs.pop('dim')
name = kwargs.pop('name', None)
needs_grad = isinstance(orig_angle, nn.Parameter) or (isinstance(orig_angle, torch.Tensor) and orig_angle.requires_grad)
if needs_grad:
build_fn = _gmr_factory(j, k, type, self.dim, self.device)
return VarGate(build_fn, orig_angle, index=index, wires=wires, dim=dim, device=self.device, name=name or gate_name, params=gate_params)
return Gate(m, index=index, wires=wires, dim=dim, device=self.device, name=name or gate_name, params=gate_params)RX
Rotation in the (0,1) symmetric subspace (qubit-like Rx when d=2) as
def RX(self: Any, angle: Any) -> Union[Operator, Gate]Implementation
def RX(self, angle: Any, *, matrix: bool=False, **kwargs: Any) -> Union[Operator, Gate]:
return self.GMR(0, 1, angle, type='sym', matrix=matrix, **kwargs)RY
Rotation in the (0,1) asymmetric subspace (qubit-like Ry when d=2) as
def RY(self: Any, angle: Any) -> Union[Operator, Gate]Implementation
def RY(self, angle: Any, *, matrix: bool=False, **kwargs: Any) -> Union[Operator, Gate]:
return self.GMR(0, 1, angle, type='asym', matrix=matrix, **kwargs)RZ
Diagonal generator rotation (qubit-like Rz when d=2) as
def RZ(self: Any, angle: Any) -> Union[Operator, Gate]Implementation
def RZ(self, angle: Any, *, matrix: bool=False, **kwargs: Any) -> Union[Operator, Gate]:
return self.GMR(0, 0, angle, type='diag', matrix=matrix, **kwargs)CU
Controlled-unitary: apply target block when control is in a chosen computational state such that when
def CU(self: Any, U_target: Any) -> Union[Operator, Gate]Implementation
def CU(self, U_target: Any=None, *, matrix: bool=False, **kwargs: Any) -> Union[Operator, Gate]:
d = self.dim
U_mat = tensorise(U_target.tensor if isinstance(U_target, Operator) else U_target, device=self.device)
if U_mat.shape != (d, d):
raise ValueError(f'U_target must be a ({d},{d}) matrix, got {U_mat.shape}.')
I = torch.eye(d, device=self.device, dtype=C64)
blocks = [I]
for k in range(1, d):
blocks.append(blocks[-1] @ U_mat)
m = torch.block_diag(*blocks)
gate_name = 'CU'
target_name = U_target.name if isinstance(U_target, Operator) else None
gate_params: List[Tuple[str, Any]] = [('target', target_name), ('dim', d)]
if matrix:
return Operator(m, gate_name, params=gate_params)
index = kwargs.pop('index')
wires = kwargs.pop('wires')
dim = kwargs.pop('dim')
name = kwargs.pop('name', None)
return Gate(m, index=index, wires=wires, dim=dim, device=self.device, name=name or gate_name, params=gate_params)CX
Controlled-X (generalized CNOT) as a standalone dense gate as
def CX(self: Any) -> OperatorImplementation
def CX(self) -> Operator:
return self.CU(self.X, matrix=True)SWAP
SWAP gate exchanging two d-dimensional subsystems such that
def SWAP(self: Any) -> OperatorImplementation
def SWAP(self) -> Operator:
d = self.dim
m = torch.zeros((d * d, d * d), dtype=C64, device=self.device)
for i in range(d):
for j in range(d):
row = i * d + j
col = j * d + i
m[col, row] = 1.0
return Operator(m, 'SWAP')U
No Definition provided
def U(self: Any, matrix: Any) -> Callable[[Any, int, Any], Gate]Implementation
def U(self, matrix: Any, **kwargs: Any) -> Callable[[Any, int, Any], Gate]:
t = tensorise(matrix, device=self.device)
name = kwargs.get('name') or 'U'
def factory(dim: Any, wires: int, index: Any, **kwargs: Any) -> Gate:
params = kwargs.get('params')
return Gate(t, index, wires, dim, device=self.device, name=name, params=params)
gate = factory
gate.name = name
return gateclass VarGate
A parametric gate that recomputes its unitary from a live nn.Parameter angle on every forward pass, enabling correct gradient flow through the angle parameter.
Used by Gategen.GMR (and RX/RY/RZ) when the supplied angle has requires_grad. The matrix is never snapshotted: each _left() call invokes _build_fn(self.angle).
init
No Definition provided
def __init__(self: Any, build_fn: 'Callable[[torch.Tensor], torch.Tensor]', angle: Any, index: List[int], wires: int, dim: Union[int, List[int]], device: str, name: str, params: Optional[list]) -> NoneImplementation
def __init__(self, build_fn: 'Callable[[torch.Tensor], torch.Tensor]', angle: Any, index: List[int], wires: int, dim: Union[int, List[int]], device: str='cpu', name: str='VarGate', params: Optional[list]=None) -> None:
if isinstance(angle, torch.Tensor):
angle_val = angle.detach().float().to(device)
else:
angle_val = torch.tensor(float(angle), dtype=torch.float32, device=device)
with torch.no_grad():
init_U = build_fn(angle_val.to(dtype=C64))
super().__init__(matrix=init_U, index=index, wires=wires, dim=dim, device=device, name=name, params=params)
self._build_fn = build_fn
if isinstance(angle, nn.Parameter):
object.__setattr__(self, 'angle', angle)
else:
self.angle = nn.Parameter(angle_val)class NoisyGate
Per-wire noise channel with pre-baked Kraus ops. Samples one branch stochastically.
init
No Definition provided
def __init__(self: Any, K: torch.Tensor, index: Any, wires: int, dims: Any, generator: Any) -> NoneImplementation
def __init__(self, K: torch.Tensor, index: Any, wires: int, dims: Any, generator=None) -> None:
idx = index if isinstance(index, list) else [index]
dim_list = [dims] * wires if isinstance(dims, int) else dims
d_local = math.prod([dim_list[i] for i in idx])
super().__init__(matrix=torch.zeros(d_local, d_local, dtype=C64), index=idx, wires=wires, dim=dim_list, name='NoisyGate')
self.register_buffer('_K', K.to(dtype=C64))
self.generator = generatorforwardd
No Definition provided
def forwardd(self: Any, rho: torch.Tensor) -> torch.TensorImplementation
def forwardd(self, rho: torch.Tensor) -> torch.Tensor:
branches = []
for k in range(self._K.shape[0]):
rho_k = self._left(rho, self._K[k])
branches.append(self._left(rho_k.conj().T, self._K[k]).conj().T)
probs = torch.stack([torch.trace(b).real for b in branches]).clamp(min=0)
probs = probs / probs.sum()
ki = torch.multinomial(probs, 1, generator=self.generator).item()
return branches[ki] / probs[ki]forward
No Definition provided
def forward(self: Any, x: torch.Tensor) -> torch.TensorImplementation
def forward(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError('NoisyGate only supports forwardd()')class TrajectoryGate
Stochastic quantum jump gate for Mode.TRAJECTORY. Operates on statevectors.
init
No Definition provided
def __init__(self: Any, K: torch.Tensor, index: Any, wires: int, dims: Any, generator: Any) -> NoneImplementation
def __init__(self, K: torch.Tensor, index: Any, wires: int, dims: Any, generator=None) -> None:
idx = index if isinstance(index, list) else [index]
dim_list = [dims] * wires if isinstance(dims, int) else dims
d_local = math.prod([dim_list[i] for i in idx])
super().__init__(matrix=torch.zeros(d_local, d_local, dtype=C64), index=idx, wires=wires, dim=dim_list, name='TrajectoryGate')
self.register_buffer('_K', K.to(dtype=C64))
self.generator = generatorforward
No Definition provided
def forward(self: Any, psi: torch.Tensor) -> torch.TensorImplementation
def forward(self, psi: torch.Tensor) -> torch.Tensor:
branches = []
for k in range(self._K.shape[0]):
psi_k = self._left(psi.view(self.total_dim), self._K[k])
branches.append(psi_k)
norms_sq = torch.stack([b.norm() ** 2 for b in branches]).real.clamp(min=0)
p = norms_sq / norms_sq.sum()
ki = torch.multinomial(p, 1, generator=self.generator).item()
psi_out = branches[ki]
return (psi_out / psi_out.norm()).view(self.total_dim, 1)forwardd
No Definition provided
def forwardd(self: Any, rho: torch.Tensor) -> torch.TensorImplementation
def forwardd(self, rho: torch.Tensor) -> torch.Tensor:
raise NotImplementedError('TrajectoryGate only supports forward()')tensorise
Convert common array-likes (Tensor/ndarray/list/Operator) into a torch complex tensor.
def tensorise(m: Any, device: str, dtype: torch.dtype) -> torch.TensorImplementation
def tensorise(m: Any, device: str='cpu', dtype: torch.dtype=C64) -> torch.Tensor:
if isinstance(m, torch.Tensor):
return m.to(device=device, dtype=dtype)
elif isinstance(m, np.ndarray):
return torch.from_numpy(m).to(device, non_blocking=True).type(dtype)
elif isinstance(m, list):
return torch.tensor(m, device=device, dtype=dtype)
elif isinstance(m, Operator):
return m.tensor.to(device=device, dtype=dtype)
else:
raise TypeError(f'Unsupported type: {type(m)}. Expected Tensor, ndarray, or list.')gell_mann
Return a generalized Gell-Mann generator
def gell_mann(j: int, k: int, d: int, device: str) -> torch.TensorImplementation
def gell_mann(j: int, k: int, d: int, device: str='cpu') -> torch.Tensor:
m = torch.zeros((d, d), dtype=C64, device=device)
if j < k:
m[j, k] = 1.0
m[k, j] = 1.0
elif j > k:
m[k, j] = -1j
m[j, k] = 1j
else:
l = j + 1
if l >= d:
return torch.eye(d, dtype=C64, device=device)
scale = math.sqrt(2 / (l * (l + 1)))
for i in range(l):
m[i, i] = scale
m[l, l] = -l * scale
return m