Skip to content

tools/metrics.py

class Fidelity

Fidelity-like functionals for states and channels.

default

Uhlmann fidelity F(ρ,σ).

For pure statevectors |ψ,|ϕ: F=|ψ|ϕ|2. For density matrices: F=(Trρσρ)2.

py
def default(rho: np.ndarray, sigma: np.ndarray) -> float [static]
Implementation
python
def default(rho: np.ndarray, sigma: np.ndarray) -> float:
    if rho.ndim == 1 and sigma.ndim == 1:
        return float(np.abs(np.vdot(rho, sigma)) ** 2)
    if rho.ndim == 1:
        rho = np.outer(rho, rho.conj())
    if sigma.ndim == 1:
        sigma = np.outer(sigma, sigma.conj())
    sqrt_rho = fractional_matrix_power(rho, 0.5)
    inner = sqrt_rho @ sigma @ sqrt_rho
    fidelity = np.trace(fractional_matrix_power(inner, 0.5)) ** 2
    return float(np.real(fidelity))

channel

Apply a quantum channel given by Kraus operators.

Computes E(ρ)=kKkρKk.

py
def channel(kraus: List[Union[np.ndarray, List[float]]], rho: np.ndarray) -> np.ndarray [static]
Implementation
python
def channel(kraus: List[Union[np.ndarray, List[float]]], rho: np.ndarray) -> np.ndarray:
    rho_out = np.zeros_like(rho, dtype=np.complex128)
    for K in kraus:
        K_arr = np.asarray(K)
        rho_out += K_arr @ rho @ K_arr.conj().T
    return rho_out

entanglement

Entanglement fidelity for an encode-noise-recovery pipeline.

Builds a purification |QR from codes, applies noise E and recovery R via Kraus sets, and returns QR|ρ|QR.

py
def entanglement(R_kraus: List[np.ndarray], E_kraus: List[np.ndarray], codes: List[np.ndarray]) -> float [static]
Implementation
python
def entanglement(R_kraus: List[np.ndarray], E_kraus: List[np.ndarray], codes: List[np.ndarray]) -> float:
    l = len(codes)
    R = np.eye(l)
    QR = 1 / np.sqrt(l) * sum([np.kron(codes[i], R[i]) for i in range(l)])
    rho = np.outer(QR, QR.conj().T)
    Eks = [np.kron(Ek, R) for Ek in E_kraus]
    Rks = [np.kron(Rk, R) for Rk in R_kraus]
    rho_new = sum([MD([Ek, rho, Ek.conj().T]) for Ek in Eks])
    rho_new = sum([MD([Rk, rho_new, Rk.conj().T]) for Rk in Rks])
    rho_new /= np.trace(rho_new)
    fid = np.dot(QR.conj().T, np.dot(rho_new, QR))
    return np.abs(fid)

cafaro

Cafaro-style entanglement fidelity proxy for a channel.

Uses Fe=k|Tr(Kk)|2/N2 for N×N Kraus operators.

py
def cafaro(kraus_ops: List[np.ndarray]) -> float [static]
Implementation
python
def cafaro(kraus_ops: List[np.ndarray]) -> float:
    N = kraus_ops[0].shape[0]
    for K in kraus_ops:
        assert K.shape == (N, N)
    F_e = sum((np.abs(np.trace(K)) ** 2 for K in kraus_ops))
    return F_e / N ** 2

negativity

Negativity of a bipartite state via the partial transpose.

Computes N(ρ)=ρTB112 using the trace norm.

py
def negativity(rho: np.ndarray, dim_A: int, dim_B: int) -> float [static]
Implementation
python
def negativity(rho: np.ndarray, dim_A: int, dim_B: int) -> float:
    rho_reshaped = rho.reshape(dim_A, dim_B, dim_A, dim_B)
    rho_pt = np.transpose(rho_reshaped, axes=(0, 3, 2, 1))
    rho_pt = rho_pt.reshape(dim_A * dim_B, dim_A * dim_B)
    singular_values = np.linalg.svd(rho_pt, compute_uv=False)
    trace_norm = np.sum(singular_values)
    return float((trace_norm - 1) / 2)

class Entropy

Common entropy functionals for probability vectors and density matrices.

default

Default entropy (alias for von Neumann entropy).

py
def default() -> float [static]
Implementation
python
def default(*args: Any) -> float:
    return float(Entropy.neumann(*args))

tsallis

Tsallis entropy Sq for a state (typically density matrix).

For eigenvalues {λi}: Sq=1iλiqq1.

py
def tsallis(rho: np.ndarray, q: float, base: float) -> float [static]
Implementation
python
def tsallis(rho: np.ndarray, q: float=2.0, base: float=2.0) -> float:
    if q == 1:
        return float(Entropy.neumann(rho, base=base))
    rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
    eigenvalues = np.linalg.eigvalsh(rho)
    eigenvalues = eigenvalues[eigenvalues > 1e-12]
    return float((1 - np.sum(eigenvalues ** q)) / (q - 1))

shannon

Shannon entropy H(p)=ipilogpi for a probability vector.

py
def shannon(probs: np.ndarray, base: float) -> float [static]
Implementation
python
def shannon(probs: np.ndarray, base: float=2.0) -> float:
    probs = probs[probs > 1e-12]
    return float(-np.sum(probs * np.log(probs) / np.log(base)))

renyi

Renyi entropy Sα for a density matrix.

For eigenvalues {λi}: Sα=11αlogiλiα.

py
def renyi(rho: np.ndarray, alpha: float, base: float) -> float [static]
Implementation
python
def renyi(rho: np.ndarray, alpha: float=2.0, base: float=2.0) -> float:
    if alpha == 1:
        return float(Entropy.neumann(rho, base=base))
    rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
    eigenvalues = np.linalg.eigvalsh(rho)
    eigenvalues = eigenvalues[eigenvalues > 1e-12]
    return float(np.log(np.sum(eigenvalues ** alpha)) / ((1 - alpha) * np.log(base)))

hartley

Hartley entropy H0=log|supp(p)| for a probability vector.

py
def hartley(probs: np.ndarray, base: float) -> float [static]
Implementation
python
def hartley(probs: np.ndarray, base: float=2.0) -> float:
    support_size = np.count_nonzero(probs > 1e-12)
    return float(np.log(support_size) / np.log(base))

neumann

von Neumann entropy S(ρ)=Tr(ρlogρ).

py
def neumann(rho: np.ndarray, base: float) -> float [static]
Implementation
python
def neumann(rho: np.ndarray, base: float=2.0) -> float:
    rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
    eigenvalues = np.linalg.eigvalsh(rho)
    eigenvalues = eigenvalues[eigenvalues > 1e-12]
    return float(-np.sum(eigenvalues * np.log(eigenvalues) / np.log(base)))

unified

Unified (q,α)-entropy family (interpolates Tsallis/Renyi cases).

py
def unified(rho: np.ndarray, q: float, alpha: float, base: float) -> float [static]
Implementation
python
def unified(rho: np.ndarray, q: float=2.0, alpha: float=2.0, base: float=2.0) -> float:
    rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
    eigenvalues = np.linalg.eigvalsh(rho)
    eigenvalues = eigenvalues[eigenvalues > 1e-12]
    s = np.sum(eigenvalues ** alpha)
    if abs(q - 1.0) < 1e-08:
        return float(np.log(s) / ((1 - alpha) * np.log(base)))
    elif abs(alpha - 1.0) < 1e-08:
        return float((1 - np.sum(eigenvalues ** q)) / (q - 1))
    else:
        return float((s ** ((1 - q) / (1 - alpha)) - 1) / (1 - q))

relative

Quantum relative entropy D(ρσ)=Tr[ρ(logρlogσ)].

py
def relative(rho: np.ndarray, sigma: np.ndarray, base: float) -> float [static]
Implementation
python
def relative(rho: np.ndarray, sigma: np.ndarray, base: float=2.0) -> float:
    rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
    sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
    eps = 1e-12
    rho += eps * np.eye(rho.shape[0])
    sigma += eps * np.eye(sigma.shape[0])
    log_rho = np.asarray(logm(rho))
    log_sigma = np.asarray(logm(sigma))
    delta_log = log_rho - log_sigma
    result = np.trace(rho @ delta_log).real
    return float(result / np.log(base))

conditional

Conditional entropy S(A|B)=S(AB)S(A) for a bipartite state.

py
def conditional(rho: np.ndarray, dA: int, dB: int) -> float [static]
Implementation
python
def conditional(rho: np.ndarray, dA: int, dB: int) -> float:
    assert rho.shape == (dA * dB, dA * dB), 'Input must be a square matrix of shape (dA*dB, dA*dB)'
    rho_A = partial.trace(rho, dA, dB, keep='A')
    S_A = Entropy.default(rho_A)
    S_AB = Entropy.default(rho)
    return S_AB - S_A

class Info

Information-theoretic quantities derived from entropies.

conditional

Conditional entropy (two conventions; controlled by true_case).

  • If true_case=True, computes the measurement-induced conditional entropy on B given a computational basis projective measurement on A.
  • Otherwise returns S(AB)S(A).
py
def conditional(rho: np.ndarray, dA: int, dB: int, true_case: bool) -> float [static]
Implementation
python
def conditional(rho: np.ndarray, dA: int, dB: int, true_case: bool=True) -> float:
    if true_case:
        d = dA
        projectors = [np.outer(b, b) for b in np.eye(d)]
        S_cond = 0
        for P in projectors:
            Pi = np.kron(P, np.eye(dB))
            prob = np.trace(Pi @ rho)
            if prob > 1e-12:
                rho_cond = Pi @ rho @ Pi / prob
                rho_B = partial.trace(rho_cond, dA, dB, keep='B')
                S_cond += prob * Entropy.default(rho_B)
        return S_cond
    else:
        assert rho.shape == (dA * dB, dA * dB)
        rho_A = partial.trace(rho, dA, dB, keep='A')
        S_A = Entropy.default(rho_A)
        S_AB = Entropy.default(rho)
        return S_AB - S_A

mutual

Quantum mutual information I(A:B)=S(A)+S(B)S(AB).

py
def mutual(rho: np.ndarray, dA: int, dB: int) -> float [static]
Implementation
python
def mutual(rho: np.ndarray, dA: int, dB: int) -> float:
    assert rho.shape == (dA * dB, dA * dB)
    rho_A = partial.trace(rho, dA, dB, keep='A')
    rho_B = partial.trace(rho, dA, dB, keep='B')
    S_A = Entropy.default(rho_A)
    S_B = Entropy.default(rho_B)
    S_AB = Entropy.default(rho)
    return S_A + S_B - S_AB

coherent

Coherent information Ic(AB)=S(B)S(AB).

py
def coherent(rho_AB: np.ndarray, dA: int, dB: int) -> float [static]
Implementation
python
def coherent(rho_AB: np.ndarray, dA: int, dB: int) -> float:
    assert rho_AB.shape == (dA * dB, dA * dB), f'expected rho ({dA * dB}, {dA * dB}), got {rho_AB.shape}'
    rho_B = partial.trace(rho_AB, dA, dB, keep='B')
    S_B = Entropy.default(rho_B)
    S_AB = Entropy.default(rho_AB)
    return S_B - S_AB

class Distance

Distances/divergences between quantum states.

relative_entropy

Relative entropy distance D(ρσ) (same as Entropy.relative).

py
def relative_entropy(rho: np.ndarray, sigma: np.ndarray, base: float) -> float [static]
Implementation
python
def relative_entropy(rho: np.ndarray, sigma: np.ndarray, base: float=2.0) -> float:
    rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
    sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
    eps = 1e-12
    rho += eps * np.eye(rho.shape[0])
    sigma += eps * np.eye(sigma.shape[0])
    log_rho = np.asarray(logm(rho))
    log_sigma = np.asarray(logm(sigma))
    delta_log = log_rho - log_sigma
    result = np.trace(rho @ delta_log).real
    return float(result / np.log(base))

bures

Bures distance DB(ρ,σ)=22F(ρ,σ).

py
def bures(rho: np.ndarray, sigma: np.ndarray) -> float [static]
Implementation
python
def bures(rho: np.ndarray, sigma: np.ndarray) -> float:
    rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
    sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
    bures_distance = np.sqrt(2 - 2 * Fidelity.default(rho, sigma) ** 0.5)
    return float(bures_distance)

jensen_shannon

Quantum Jensen-Shannon divergence (symmetric, smoothed version of relative entropy).

Uses 12(D(ρm)+D(σm)) with m=(ρ+σ)/2.

py
def jensen_shannon(rho: np.ndarray, sigma: np.ndarray, base: float) -> float [static]
Implementation
python
def jensen_shannon(rho: np.ndarray, sigma: np.ndarray, base: float=2.0) -> float:
    rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
    sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
    m = 0.5 * (rho + sigma)
    return 0.5 * (Distance.relative_entropy(rho, m, base) + Distance.relative_entropy(sigma, m, base))

trace_distance

Trace distance 12ρσ1 via singular values.

py
def trace_distance(rho: np.ndarray, sigma: np.ndarray) -> float [static]
Implementation
python
def trace_distance(rho: np.ndarray, sigma: np.ndarray) -> float:
    rho = np.outer(rho, rho.conj()) if rho.ndim == 1 else rho
    sigma = np.outer(sigma, sigma.conj()) if sigma.ndim == 1 else sigma
    return 0.5 * np.trace(svdvals(rho - sigma)).real