Stabilizer Purity¶
In [1]:
Copied!
import numpy as np
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
try:
import numqi
except ImportError:
%pip install numqi
import numqi
import numpy as np
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
try:
import numqi
except ImportError:
%pip install numqi
import numqi
For pure states $|\psi\rangle$
$$ P_{\alpha}\left(|\psi\rangle\right)=2^{-n}\sum_{x\in P_{n}}\langle\psi|x|\psi\rangle^{2\alpha} $$
For mixed states $\rho$ (convex roof extension)
$$ \begin{align*} P_{\alpha}(\rho)&=\sup_{\left\{ p_{s},|\psi_{s}\rangle\right\} }\sum_{s}p_{s}P_{\alpha}\left(|\psi_{\alpha}\rangle\right)\\ &=\sup_{\left\{ |\tilde{\psi}_{s}\rangle\right\} }2^{-n}\sum_{s}p_{s}^{1-2\alpha}\sum_{x\in P_{n}}|\langle\tilde{\psi}_{s}|x|\tilde{\psi}_{s}\rangle|^{2\alpha} \end{align*} $$
linear stabilizer entropy
$$ M_{\alpha}(\rho)=1-P_{\alpha}(\rho) $$
H state¶
In [2]:
Copied!
num_qubit = 1
alpha_list = [2,3,4]
prob_list = np.linspace(0, 1, 100)
psi = np.array([1, np.sqrt(2)-1]) / np.sqrt(4-2*np.sqrt(2)) #Hstate
# psi = numqi.random.rand_haar_state(2**num_qubit) #random state
dm_target = psi.reshape(-1,1) * psi.conj()
alpha_boundary = 0.5 / np.abs(numqi.gellmann.dm_to_gellmann_basis(dm_target)).sum()
ret_opt = []
for alpha_i in alpha_list:
model = numqi.magic.MagicStabilizerEntropyModel(alpha_i, num_qubit, num_term=4*(2**num_qubit))
for prob_i in tqdm(prob_list):
model.set_density_matrix(numqi.utils.hf_interpolate_dm(dm_target, alpha=prob_i))
ret_opt.append(-numqi.optimize.minimize(model, 'uniform', num_repeat=10, tol=1e-10, print_every_round=0).fun)
ret_opt = np.array(ret_opt).reshape(len(alpha_list), -1)
num_qubit = 1
alpha_list = [2,3,4]
prob_list = np.linspace(0, 1, 100)
psi = np.array([1, np.sqrt(2)-1]) / np.sqrt(4-2*np.sqrt(2)) #Hstate
# psi = numqi.random.rand_haar_state(2**num_qubit) #random state
dm_target = psi.reshape(-1,1) * psi.conj()
alpha_boundary = 0.5 / np.abs(numqi.gellmann.dm_to_gellmann_basis(dm_target)).sum()
ret_opt = []
for alpha_i in alpha_list:
model = numqi.magic.MagicStabilizerEntropyModel(alpha_i, num_qubit, num_term=4*(2**num_qubit))
for prob_i in tqdm(prob_list):
model.set_density_matrix(numqi.utils.hf_interpolate_dm(dm_target, alpha=prob_i))
ret_opt.append(-numqi.optimize.minimize(model, 'uniform', num_repeat=10, tol=1e-10, print_every_round=0).fun)
ret_opt = np.array(ret_opt).reshape(len(alpha_list), -1)
In [3]:
Copied!
fig,ax = plt.subplots()
ax.axvline(alpha_boundary, linestyle=':', color='red', label=r'$0.5\|\vec{\rho}\|_1^{-1}$')
for ind0 in range(len(alpha_list)):
ax.plot(prob_list, 1-ret_opt[ind0], label=f'alpha={alpha_list[ind0]}')
ax.set_xlabel(r'$p\rho + (1-p)I/d$')
ax.set_ylabel('linear Stab Entropy')
ax.set_xlim(0, 1)
ax.legend()
ax.set_yscale('log')
fig.tight_layout()
fig,ax = plt.subplots()
ax.axvline(alpha_boundary, linestyle=':', color='red', label=r'$0.5\|\vec{\rho}\|_1^{-1}$')
for ind0 in range(len(alpha_list)):
ax.plot(prob_list, 1-ret_opt[ind0], label=f'alpha={alpha_list[ind0]}')
ax.set_xlabel(r'$p\rho + (1-p)I/d$')
ax.set_ylabel('linear Stab Entropy')
ax.set_xlim(0, 1)
ax.legend()
ax.set_yscale('log')
fig.tight_layout()
CS state¶
WARNING: bad convergence using gradient descent
In [4]:
Copied!
alpha_list = [2]
CSstate = np.array([1, 1, 1, 1j], dtype=np.complex128) / 2
dm_target = CSstate.reshape(-1,1) * CSstate.conj()
num_qubit = numqi.utils.hf_num_state_to_num_qubit(dm_target.shape[0])
prob_list = np.linspace(0, 1, 50)
ret_opt = []
for alpha_i in alpha_list:
model = numqi.magic.MagicStabilizerEntropyModel(alpha_i, num_qubit, num_term=2*(2**num_qubit))
for prob_i in tqdm(prob_list):
model.set_density_matrix(numqi.utils.hf_interpolate_dm(dm_target, alpha=prob_i))
ret_opt.append(-numqi.optimize.minimize(model, 'uniform', num_repeat=3, tol=1e-10, print_every_round=0).fun) #severe local minima
# ret_opt.append(-numqi.optimize.minimize_adam(model, num_step=5000, theta0='uniform', optim_args=('adam', 0.03,0.01), tqdm_update_freq=0))
ret_opt = np.array(ret_opt).reshape(len(alpha_list), -1)
alpha_list = [2]
CSstate = np.array([1, 1, 1, 1j], dtype=np.complex128) / 2
dm_target = CSstate.reshape(-1,1) * CSstate.conj()
num_qubit = numqi.utils.hf_num_state_to_num_qubit(dm_target.shape[0])
prob_list = np.linspace(0, 1, 50)
ret_opt = []
for alpha_i in alpha_list:
model = numqi.magic.MagicStabilizerEntropyModel(alpha_i, num_qubit, num_term=2*(2**num_qubit))
for prob_i in tqdm(prob_list):
model.set_density_matrix(numqi.utils.hf_interpolate_dm(dm_target, alpha=prob_i))
ret_opt.append(-numqi.optimize.minimize(model, 'uniform', num_repeat=3, tol=1e-10, print_every_round=0).fun) #severe local minima
# ret_opt.append(-numqi.optimize.minimize_adam(model, num_step=5000, theta0='uniform', optim_args=('adam', 0.03,0.01), tqdm_update_freq=0))
ret_opt = np.array(ret_opt).reshape(len(alpha_list), -1)
In [5]:
Copied!
fig,ax = plt.subplots()
ax.axvline(1/2, color='red', label='p=1/2')
for ind0 in range(len(alpha_list)):
ax.plot(prob_list, 1-ret_opt[ind0], label=f'alpha={alpha_list[ind0]}')
ax.set_xlabel(r'$p\rho + (1-p)I/d$')
ax.set_ylabel('linear Stab Entropy')
ax.set_title(f'CS state (bad convergence)')
ax.legend()
ax.set_yscale('log')
fig.tight_layout()
fig,ax = plt.subplots()
ax.axvline(1/2, color='red', label='p=1/2')
for ind0 in range(len(alpha_list)):
ax.plot(prob_list, 1-ret_opt[ind0], label=f'alpha={alpha_list[ind0]}')
ax.set_xlabel(r'$p\rho + (1-p)I/d$')
ax.set_ylabel('linear Stab Entropy')
ax.set_title(f'CS state (bad convergence)')
ax.legend()
ax.set_yscale('log')
fig.tight_layout()