tools/metrics.py
class Fidelity
Fidelity-like functionals for states and channels.
default
Uhlmann fidelity
For pure statevectors
def default(rho: np.ndarray, sigma: np.ndarray) -> float [static]Implementation
def default(rho: np.ndarray, sigma: np.ndarray) -> float:
if rho.ndim == 1 and sigma.ndim == 1:
return float(np.abs(np.vdot(rho, sigma)) ** 2)
if rho.ndim == 1:
rho = np.outer(rho, rho.conj())
if sigma.ndim == 1:
sigma = np.outer(sigma, sigma.conj())
sqrt_rho = fractional_matrix_power(rho, 0.5)
inner = sqrt_rho @ sigma @ sqrt_rho
fidelity = np.trace(fractional_matrix_power(inner, 0.5)) ** 2
return float(np.real(fidelity))channel
Apply a quantum channel given by Kraus operators.
Computes
def channel(kraus: List[Union[np.ndarray, List[float]]], rho: np.ndarray) -> np.ndarray [static]Implementation
def channel(kraus: List[Union[np.ndarray, List[float]]], rho: np.ndarray) -> np.ndarray:
rho_out = np.zeros_like(rho, dtype=np.complex128)
for K in kraus:
K_arr = np.asarray(K)
rho_out += K_arr @ rho @ K_arr.conj().T
return rho_outentanglement
Entanglement fidelity for an encode-noise-recovery pipeline.
Builds a purification codes, applies noise
def entanglement(R_kraus: List[np.ndarray], E_kraus: List[np.ndarray], codes: List[np.ndarray]) -> float [static]Implementation
def entanglement(R_kraus: List[np.ndarray], E_kraus: List[np.ndarray], codes: List[np.ndarray]) -> float:
l = len(codes)
R = np.eye(l)
QR = 1 / np.sqrt(l) * sum([np.kron(codes[i], R[i]) for i in range(l)])
rho = np.outer(QR, QR.conj().T)
Eks = [np.kron(Ek, R) for Ek in E_kraus]
Rks = [np.kron(Rk, R) for Rk in R_kraus]
rho_new = sum([MD([Ek, rho, Ek.conj().T]) for Ek in Eks])
rho_new = sum([MD([Rk, rho_new, Rk.conj().T]) for Rk in Rks])
rho_new /= np.trace(rho_new)
fid = np.dot(QR.conj().T, np.dot(rho_new, QR))
return np.abs(fid)cafaro
Cafaro-style entanglement fidelity proxy for a channel.
Uses
def cafaro(kraus_ops: List[np.ndarray]) -> float [static]Implementation
def cafaro(kraus_ops: List[np.ndarray]) -> float:
N = kraus_ops[0].shape[0]
for K in kraus_ops:
assert K.shape == (N, N)
F_e = sum((np.abs(np.trace(K)) ** 2 for K in kraus_ops))
return F_e / N ** 2negativity
Negativity of a bipartite state via the partial transpose.
Computes
def negativity(rho: np.ndarray, dim_A: int, dim_B: int) -> float [static]Implementation
def negativity(rho: np.ndarray, dim_A: int, dim_B: int) -> float:
rho_reshaped = rho.reshape(dim_A, dim_B, dim_A, dim_B)
rho_pt = np.transpose(rho_reshaped, axes=(0, 3, 2, 1))
rho_pt = rho_pt.reshape(dim_A * dim_B, dim_A * dim_B)
singular_values = np.linalg.svd(rho_pt, compute_uv=False)
trace_norm = np.sum(singular_values)
return float((trace_norm - 1) / 2)class Entropy
Common entropy functionals for probability vectors and density matrices.
default
Default entropy (alias for von Neumann entropy).
def default() -> float [static]Implementation
def default(*args: Any) -> float:
return float(Entropy.neumann(*args))tsallis
Tsallis entropy
For eigenvalues
def tsallis(rho: np.ndarray, q: float, base: float) -> float [static]Implementation
def tsallis(rho: np.ndarray, q: float=2.0, base: float=2.0) -> float:
if q == 1:
return float(Entropy.neumann(rho, base=base))
rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
eigenvalues = np.linalg.eigvalsh(rho)
eigenvalues = eigenvalues[eigenvalues > 1e-12]
return float((1 - np.sum(eigenvalues ** q)) / (q - 1))shannon
Shannon entropy
def shannon(probs: np.ndarray, base: float) -> float [static]Implementation
def shannon(probs: np.ndarray, base: float=2.0) -> float:
probs = probs[probs > 1e-12]
return float(-np.sum(probs * np.log(probs) / np.log(base)))renyi
Renyi entropy
For eigenvalues
def renyi(rho: np.ndarray, alpha: float, base: float) -> float [static]Implementation
def renyi(rho: np.ndarray, alpha: float=2.0, base: float=2.0) -> float:
if alpha == 1:
return float(Entropy.neumann(rho, base=base))
rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
eigenvalues = np.linalg.eigvalsh(rho)
eigenvalues = eigenvalues[eigenvalues > 1e-12]
return float(np.log(np.sum(eigenvalues ** alpha)) / ((1 - alpha) * np.log(base)))hartley
Hartley entropy
def hartley(probs: np.ndarray, base: float) -> float [static]Implementation
def hartley(probs: np.ndarray, base: float=2.0) -> float:
support_size = np.count_nonzero(probs > 1e-12)
return float(np.log(support_size) / np.log(base))neumann
von Neumann entropy
def neumann(rho: np.ndarray, base: float) -> float [static]Implementation
def neumann(rho: np.ndarray, base: float=2.0) -> float:
rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
eigenvalues = np.linalg.eigvalsh(rho)
eigenvalues = eigenvalues[eigenvalues > 1e-12]
return float(-np.sum(eigenvalues * np.log(eigenvalues) / np.log(base)))unified
Unified
def unified(rho: np.ndarray, q: float, alpha: float, base: float) -> float [static]Implementation
def unified(rho: np.ndarray, q: float=2.0, alpha: float=2.0, base: float=2.0) -> float:
rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
eigenvalues = np.linalg.eigvalsh(rho)
eigenvalues = eigenvalues[eigenvalues > 1e-12]
s = np.sum(eigenvalues ** alpha)
if abs(q - 1.0) < 1e-08:
return float(np.log(s) / ((1 - alpha) * np.log(base)))
elif abs(alpha - 1.0) < 1e-08:
return float((1 - np.sum(eigenvalues ** q)) / (q - 1))
else:
return float((s ** ((1 - q) / (1 - alpha)) - 1) / (1 - q))relative
Quantum relative entropy
def relative(rho: np.ndarray, sigma: np.ndarray, base: float) -> float [static]Implementation
def relative(rho: np.ndarray, sigma: np.ndarray, base: float=2.0) -> float:
rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
eps = 1e-12
rho += eps * np.eye(rho.shape[0])
sigma += eps * np.eye(sigma.shape[0])
log_rho = np.asarray(logm(rho))
log_sigma = np.asarray(logm(sigma))
delta_log = log_rho - log_sigma
result = np.trace(rho @ delta_log).real
return float(result / np.log(base))conditional
Conditional entropy
def conditional(rho: np.ndarray, dA: int, dB: int) -> float [static]Implementation
def conditional(rho: np.ndarray, dA: int, dB: int) -> float:
assert rho.shape == (dA * dB, dA * dB), 'Input must be a square matrix of shape (dA*dB, dA*dB)'
rho_A = partial.trace(rho, dA, dB, keep='A')
S_A = Entropy.default(rho_A)
S_AB = Entropy.default(rho)
return S_AB - S_Aclass Info
Information-theoretic quantities derived from entropies.
conditional
Conditional entropy (two conventions; controlled by true_case).
- If
true_case=True, computes the measurement-induced conditional entropy on B given a computational basis projective measurement on A. - Otherwise returns
.
def conditional(rho: np.ndarray, dA: int, dB: int, true_case: bool) -> float [static]Implementation
def conditional(rho: np.ndarray, dA: int, dB: int, true_case: bool=True) -> float:
if true_case:
d = dA
projectors = [np.outer(b, b) for b in np.eye(d)]
S_cond = 0
for P in projectors:
Pi = np.kron(P, np.eye(dB))
prob = np.trace(Pi @ rho)
if prob > 1e-12:
rho_cond = Pi @ rho @ Pi / prob
rho_B = partial.trace(rho_cond, dA, dB, keep='B')
S_cond += prob * Entropy.default(rho_B)
return S_cond
else:
assert rho.shape == (dA * dB, dA * dB)
rho_A = partial.trace(rho, dA, dB, keep='A')
S_A = Entropy.default(rho_A)
S_AB = Entropy.default(rho)
return S_AB - S_Amutual
Quantum mutual information
def mutual(rho: np.ndarray, dA: int, dB: int) -> float [static]Implementation
def mutual(rho: np.ndarray, dA: int, dB: int) -> float:
assert rho.shape == (dA * dB, dA * dB)
rho_A = partial.trace(rho, dA, dB, keep='A')
rho_B = partial.trace(rho, dA, dB, keep='B')
S_A = Entropy.default(rho_A)
S_B = Entropy.default(rho_B)
S_AB = Entropy.default(rho)
return S_A + S_B - S_ABcoherent
Coherent information
def coherent(rho_AB: np.ndarray, dA: int, dB: int) -> float [static]Implementation
def coherent(rho_AB: np.ndarray, dA: int, dB: int) -> float:
assert rho_AB.shape == (dA * dB, dA * dB), f'expected rho ({dA * dB}, {dA * dB}), got {rho_AB.shape}'
rho_B = partial.trace(rho_AB, dA, dB, keep='B')
S_B = Entropy.default(rho_B)
S_AB = Entropy.default(rho_AB)
return S_B - S_ABclass Distance
Distances/divergences between quantum states.
relative_entropy
Relative entropy distance Entropy.relative).
def relative_entropy(rho: np.ndarray, sigma: np.ndarray, base: float) -> float [static]Implementation
def relative_entropy(rho: np.ndarray, sigma: np.ndarray, base: float=2.0) -> float:
rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
eps = 1e-12
rho += eps * np.eye(rho.shape[0])
sigma += eps * np.eye(sigma.shape[0])
log_rho = np.asarray(logm(rho))
log_sigma = np.asarray(logm(sigma))
delta_log = log_rho - log_sigma
result = np.trace(rho @ delta_log).real
return float(result / np.log(base))bures
Bures distance
def bures(rho: np.ndarray, sigma: np.ndarray) -> float [static]Implementation
def bures(rho: np.ndarray, sigma: np.ndarray) -> float:
rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
bures_distance = np.sqrt(2 - 2 * Fidelity.default(rho, sigma) ** 0.5)
return float(bures_distance)jensen_shannon
Quantum Jensen-Shannon divergence (symmetric, smoothed version of relative entropy).
Uses
def jensen_shannon(rho: np.ndarray, sigma: np.ndarray, base: float) -> float [static]Implementation
def jensen_shannon(rho: np.ndarray, sigma: np.ndarray, base: float=2.0) -> float:
rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
m = 0.5 * (rho + sigma)
return 0.5 * (Distance.relative_entropy(rho, m, base) + Distance.relative_entropy(sigma, m, base))trace_distance
Trace distance
def trace_distance(rho: np.ndarray, sigma: np.ndarray) -> float [static]Implementation
def trace_distance(rho: np.ndarray, sigma: np.ndarray) -> float:
rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
return 0.5 * np.trace(svdvals(rho - sigma)).real