circuit/gates.py
class Gate
Generic gate class wrapping a complex matrix/tensor with a name and optional parameters/metadata.
- tensor product via '^'
- matrix composition via '@'.
| name | type | default |
|---|---|---|
| name | str | None |
| params | List[Any] | None |
| tensor | torch.Tensor | None |
init
No Definition provided
def __init__(self: Any, tensor: torch.Tensor, name: str, params: Optional[List[Any]]) -> AnyImplementation
def __init__(self, tensor: torch.Tensor, name: str, params: Optional[List[Any]]=None):
self.tensor = tensor
self.name = name
self.params = list(params) if params is not None else []class Unitary
Parameterized unitary acting on a subset of wires, embedded into the full Hilbert space. A generic container class which handles all the reshaping/permuting logic to apply a target-space unitary to the correct subset of wires in a larger system.
The full dense matrix can be materialized if needed, but the main use is to apply the unitary to states or compose with other gates without ever explicitly constructing the full matrix. The target-space matrix is stored as a parameter and can be optimized over if desired.
| name | type | default |
|---|---|---|
| U | torch.Tensor | None |
| all | List[int] | None |
| device | str | None |
| dims | List[int] | None |
| index | List[int] | None |
| inv_perm | List[int] | None |
| name | str | None |
| params | List[Any] | None |
| perm | List[int] | None |
| rest_size | int | None |
| target_dims | List[int] | None |
| target_size | int | None |
| total_dim | int | None |
| unused | List[int] | None |
| wires | int | None |
init
No Definition provided
def __init__(self: Any, matrix: Any, index: List[int], wires: int, dim: Union[int, List[int]], device: str, name: str, params: Optional[list]) -> AnyImplementation
def __init__(self, matrix: Any, index: List[int], wires: int, dim: Union[int, List[int]], device: str='cpu', name: str='U', params: Optional[list]=None):
super().__init__()
self.device = device
self.wires = wires
self.index = index if isinstance(index, list) else [index]
self.dims = [dim] * wires if isinstance(dim, int) else dim
self.name = name
self.params = list(params) if params is not None else []
self.total_dim = int(np.prod(self.dims))
self.target_dims = [self.dims[i] for i in self.index]
self.target_size = int(np.prod(self.target_dims))
self.U = tensorise(matrix, device=device)
if self.U.shape != (self.target_size, self.target_size):
raise ValueError(f'Matrix shape {self.U.shape} does not match target size {(self.target_size, self.target_size)}.')
self.all = list(range(self.wires))
self.unused = [i for i in self.all if i not in self.index]
self.perm = self.index + self.unused
self.inv_perm = [self.perm.index(i) for i in range(self.wires)]
self.rest_size = self.total_dim // self.target_sizeforward
Apply the embedded unitary to a statevector (reshaping/permuting wires as needed) as
def forward(self: Any, x: torch.Tensor) -> torch.TensorImplementation
def forward(self, x: torch.Tensor) -> torch.Tensor:
psi = x.view(*self.dims)
psi = psi.permute(*self.perm)
psi_flat = psi.reshape(self.target_size, self.rest_size)
psi_out = self.U @ psi_flat
current_dims = [self.dims[i] for i in self.perm]
psi_out = psi_out.view(*current_dims)
psi_final = psi_out.permute(*self.inv_perm).contiguous()
return psi_final.view(self.total_dim, 1)forwardd
Apply the unitary channel to a density matrix as
def forwardd(self: Any, rho: torch.Tensor) -> torch.TensorImplementation
def forwardd(self, rho: torch.Tensor) -> torch.Tensor:
U = self.matrix()
return U @ rho @ U.conj().Tmatrix
Materialize the full dense matrix by applying the module to computational basis vectors.
def matrix(self: Any) -> torch.TensorImplementation
def matrix(self) -> torch.Tensor:
eye = torch.eye(self.total_dim, device=self.device, dtype=C64)
cols = []
for i in range(self.total_dim):
cols.append(self.forward(eye[i]))
return torch.cat(cols, dim=1)class Gategen
Convenience factory for common qudit gates at a fixed local dimension.
| name | type | default |
|---|---|---|
| device | str | None |
| dim | int | None |
init
No Definition provided
def __init__(self: Any, dim: int, device: str) -> AnyImplementation
def __init__(self, dim: int=2, device: str='cpu'):
self.dim = dim
self.device = deviceasU
Wrap a target-space matrix as an embedded Unitary acting on given wire indices.
def asU(self: Any, m: torch.Tensor, index: Union[int, List[int]], wires: int, dim: Union[int, List[int]], name: Optional[str], params: Optional[list]) -> UnitaryImplementation
def asU(self, m: torch.Tensor, index: Union[int, List[int]], wires: int, dim: Union[int, List[int]], name: Optional[str]=None, params: Optional[list]=None) -> Unitary:
return Unitary(m, index=index, wires=wires, dim=dim, device=self.device, name=name or 'U', params=params)I
Identity gate on one qudit such that
def I(self: Any) -> GateImplementation
def I(self) -> Gate:
return Gate(torch.eye(self.dim, dtype=C64, device=self.device), 'I')H
Hadamard/DFT gate (H for d=2, discrete Fourier transform for
def H(self: Any) -> GateImplementation
def H(self) -> Gate:
d = self.dim
if d == 2:
m = torch.tensor([[1, 1], [1, -1]], dtype=C64, device=self.device) / np.sqrt(2)
else:
w = np.exp(2j * torch.pi / d)
idx = torch.arange(d, device=self.device)
m = w ** torch.outer(idx, idx) / np.sqrt(d)
return Gate(m.to(dtype=C64), 'H')X
Generalized X (cyclic shift) gate as
def X(self: Any) -> GateImplementation
def X(self) -> Gate:
if self.dim == 2:
m = torch.tensor([[0, 1], [1, 0]], dtype=C64, device=self.device)
else:
m = torch.roll(torch.eye(self.dim, dtype=C64, device=self.device), shifts=-1, dims=1)
return Gate(m, 'X')Z
Generalized Z (phase) gate: diag(ω^k) as
def Z(self: Any) -> GateImplementation
def Z(self) -> Gate:
d = self.dim
if d == 2:
m = torch.tensor([[1, 0], [0, -1]], dtype=C64, device=self.device)
else:
w = np.exp(2j * torch.pi / d)
idx = torch.arange(d, device=self.device)
m = torch.diag(w ** idx)
return Gate(m, 'Z')Y
Generalized Y (up to phase), built from Z and X (Pauli-Y for d=2) as
def Y(self: Any) -> GateImplementation
def Y(self) -> Gate:
d = self.dim
if d == 2:
m = torch.tensor([[0, -1j], [1j, 0]], dtype=C64, device=self.device)
else:
m = torch.matmul(self.Z.tensor, self.X.tensor) / 1j
return Gate(m, 'Y')inCircuit
Return True if kwargs contain circuit-embedding keys (index/wires/dim).
def inCircuit(self: Any, kwargs: dict) -> boolImplementation
def inCircuit(self, kwargs: dict) -> bool:
return all((k in kwargs for k in ('index', 'wires', 'dim')))GMR
Generalized rotation from a Gell-Mann generator (symmetric/asymmetric/diagonal). Gell-Mann gates are described by their type (sym/asym/diag) and the indices j, k specifying the generator.
We generate each of them as
def GMR(self: Any, j: int, k: int, angle: Any, type: str) -> Union[Gate, Unitary]Implementation
def GMR(self, j: int, k: int, angle: Any, type: str='asym', *, matrix: bool=False, **kwargs: Any) -> Union[Gate, Unitary]:
if not self.inCircuit(kwargs):
matrix = True
if not isinstance(angle, torch.Tensor):
angle = torch.tensor(angle, dtype=C64, device=self.device)
else:
angle = angle.to(device=self.device)
if type == 'sym':
idx1, idx2 = (min(j, k), max(j, k))
if idx1 == idx2:
raise ValueError('Symmetric requires distinct j, k')
elif type == 'asym':
idx1, idx2 = (max(j, k), min(j, k))
elif type == 'diag':
idx1, idx2 = (j, j)
else:
raise ValueError('type must be sym, asym, or diag')
gen = gell_mann(idx1, idx2, self.dim, device=self.device)
if type in ['sym', 'asym']:
m = torch.eye(self.dim, dtype=C64, device=self.device)
c = torch.cos(angle / 2).to(dtype=C64)
s = torch.sin(angle / 2).to(dtype=C64)
a, b = (min(j, k), max(j, k))
m[a, a] = c
m[b, b] = c
if type == 'sym':
m[a, b] = -1j * s
m[b, a] = -1j * s
else:
m[a, b] = -s
m[b, a] = s
gate_name = f'GMR_{type}'
else:
ang = angle.to(dtype=C64)
m = torch.matrix_exp(-1j * (ang / 2) * gen)
gate_name = f'GMR_{type}'
gate_params: List[Tuple[str, Any]] = [('type', type), ('j', j), ('k', k), ('angle', angle), ('dim', self.dim)]
if matrix:
return Gate(m, gate_name, params=gate_params)
if not self.inCircuit(kwargs):
raise TypeError(f'{gate_name} missing circuit kwargs (index/wires/dim). Call with matrix=True for standalone Gate.')
index = kwargs.pop('index')
wires = kwargs.pop('wires')
dim = kwargs.pop('dim')
name = kwargs.pop('name', None)
return self.asU(m, index=index, wires=wires, dim=dim, name=name or gate_name, params=gate_params)RX
Rotation in the (0,1) symmetric subspace (qubit-like Rx when d=2) as
def RX(self: Any, angle: Any) -> Union[Gate, Unitary]Implementation
def RX(self, angle: Any, *, matrix: bool=False, **kwargs: Any) -> Union[Gate, Unitary]:
return self.GMR(0, 1, angle, type='sym', matrix=matrix, **kwargs)RY
Rotation in the (0,1) asymmetric subspace (qubit-like Ry when d=2) as
def RY(self: Any, angle: Any) -> Union[Gate, Unitary]Implementation
def RY(self, angle: Any, *, matrix: bool=False, **kwargs: Any) -> Union[Gate, Unitary]:
return self.GMR(0, 1, angle, type='asym', matrix=matrix, **kwargs)RZ
Diagonal generator rotation (qubit-like Rz when d=2) as
def RZ(self: Any, angle: Any) -> Union[Gate, Unitary]Implementation
def RZ(self, angle: Any, *, matrix: bool=False, **kwargs: Any) -> Union[Gate, Unitary]:
return self.GMR(0, 0, angle, type='diag', matrix=matrix, **kwargs)CU
Controlled-unitary: apply target block when control is in a chosen computational state such that when
def CU(self: Any, U_target: Any) -> Union[Gate, Unitary]Implementation
def CU(self, U_target: Any=None, *, matrix: bool=False, **kwargs: Any) -> Union[Gate, Unitary]:
d = self.dim
U_mat = tensorise(U_target.tensor if isinstance(U_target, Gate) else U_target, device=self.device)
if U_mat.shape != (d, d):
raise ValueError(f'U_target must be a ({d},{d}) matrix, got {U_mat.shape}.')
I = torch.eye(d, device=self.device, dtype=C64)
blocks = [I]
for k in range(1, d):
blocks.append(blocks[-1] @ U_mat)
m = torch.block_diag(*blocks)
gate_name = 'CU'
target_name = U_target.name if isinstance(U_target, Gate) else None
gate_params: List[Tuple[str, Any]] = [('target', target_name), ('dim', d)]
if matrix:
return Gate(m, gate_name, params=gate_params)
index = kwargs.pop('index')
wires = kwargs.pop('wires')
dim = kwargs.pop('dim')
name = kwargs.pop('name', None)
return self.asU(m, index=index, wires=wires, dim=dim, name=name or gate_name, params=gate_params)CX
Controlled-X (generalized CNOT) as a standalone dense gate as
def CX(self: Any) -> GateImplementation
def CX(self) -> Gate:
return self.CU(self.X, matrix=True)SWAP
SWAP gate exchanging two d-dimensional subsystems such that
def SWAP(self: Any) -> GateImplementation
def SWAP(self) -> Gate:
d = self.dim
m = torch.zeros((d * d, d * d), dtype=C64, device=self.device)
for i in range(d):
for j in range(d):
row = i * d + j
col = j * d + i
m[col, row] = 1.0
return Gate(m, 'SWAP')U
No Definition provided
def U(self: Any, matrix: Any) -> Callable[[Any, int, Any], Unitary]Implementation
def U(self, matrix: Any, **kwargs: Any) -> Callable[[Any, int, Any], Unitary]:
t = tensorise(matrix, device=self.device)
name = kwargs.get('name') or 'U'
def factory(dim: Any, wires: int, index: Any, **kwargs: Any) -> Unitary:
params = kwargs.get('params')
return Unitary(t, index, wires, dim, device=self.device, name=name, params=params)
gate = factory
gate.name = name
return gatetensorise
Convert common array-likes (Tensor/ndarray/list/Gate) into a torch complex tensor.
def tensorise(m: Any, device: str, dtype: torch.dtype) -> torch.TensorImplementation
def tensorise(m: Any, device: str='cpu', dtype: torch.dtype=C64) -> torch.Tensor:
if isinstance(m, torch.Tensor):
return m.to(device=device, dtype=dtype)
elif isinstance(m, np.ndarray):
return torch.from_numpy(m).to(device, non_blocking=True).type(dtype)
elif isinstance(m, list):
return torch.tensor(m, device=device, dtype=dtype)
elif isinstance(m, Gate):
return m.tensor.to(device=device, dtype=dtype)
else:
raise TypeError(f'Unsupported type: {type(m)}. Expected Tensor, ndarray, or list.')gell_mann
Return a (generalized) Gell-Mann generator
def gell_mann(j: int, k: int, d: int, device: str) -> torch.TensorImplementation
def gell_mann(j: int, k: int, d: int, device: str='cpu') -> torch.Tensor:
m = torch.zeros((d, d), dtype=C64, device=device)
if j < k:
m[j, k] = 1.0
m[k, j] = 1.0
elif j > k:
m[k, j] = torch.tensor(-1j, dtype=C64, device=device)
m[j, k] = torch.tensor(1j, dtype=C64, device=device)
else:
l = j + 1
if l >= d:
return torch.eye(d, dtype=C64, device=device)
scale = np.sqrt(2 / (l * (l + 1)))
for i in range(l):
m[i, i] = scale
m[l, l] = -l * scale
return m