Linear entropy of entanglement¶
import numpy as np
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
try:
import numqi
except ImportError:
%pip install numqi
import numqi
For pure state $|\psi\rangle$
$$ S_{l}(\rho)=1-\mathrm{Tr}[\rho^{2}] $$
$$ E_{l}\left(|\psi\rangle\right)=S_{l}\left(\mathrm{Tr}_{B}\left[|\psi\rangle\langle\psi|\right]\right) $$
For mixed state $\rho$ (convex roof extension)
$$ \begin{align*} E_{l}(\rho)&=\min_{\left\{ p_{i},|\psi_{i}\rangle\right\} }\sum_{i}p_{i}S_{l}\left(|\psi_{i}\rangle\right)\\ &=1-\max_{\left\{ |\tilde{\psi}_{i}\rangle\right\} }\sum_{i}\frac{\mathrm{Tr}\left[\tilde{\psi}_{i}\tilde{\psi}_{i}^{\dagger}\tilde{\psi}_{i}\tilde{\psi}_{i}^{\dagger}\right]}{\mathrm{Tr}\left[\tilde{\psi}_{i}\tilde{\psi}_{i}^{\dagger}\right]} \end{align*} $$
Below, LEE is calculated for some two-qubit states via semi-definite programming (SDP) method doi-link.
rhoA = numqi.random.rand_density_matrix(2)
rhoB = numqi.random.rand_density_matrix(2)
rho = np.kron(rhoA, rhoB)
print('LEE for a random product state:', numqi.entangle.get_linear_entropy_entanglement_ppt(rho, (2,2)))
rho = numqi.random.rand_separable_dm(2, 2)
print('LEE for a random separable state:', numqi.entangle.get_linear_entropy_entanglement_ppt(rho, (2,2)))
rho = numqi.random.rand_density_matrix(4)
print('LEE for a random density matrix:', numqi.entangle.get_linear_entropy_entanglement_ppt(rho, (2,2)))
psi = numqi.state.maximally_entangled_state(2)
rho = psi.reshape(-1,1) * psi.conj()
print('LEE for a maximally entangled state:', numqi.entangle.get_linear_entropy_entanglement_ppt(rho, (2,2)))
LEE for a random product state: 0 LEE for a random separable state: 0
LEE for a random density matrix: 0.0013973684432351252 LEE for a maximally entangled state: 0.4999986936612686
Werner states¶
Below, gradient-based optimization is used to find the LEE of Werner states.
alpha_list = np.linspace(0, 1, 100)
dim = 3
model = numqi.entangle.DensityMatrixLinearEntropyModel([dim,dim], num_ensemble=27, kind='convex')
ret0 = []
for alpha_i in tqdm(alpha_list):
model.set_density_matrix(numqi.state.Werner(dim, alpha=alpha_i))
ret0.append(numqi.optimize.minimize(model, num_repeat=3, tol=1e-10, print_every_round=0).fun)
ret0 = np.array(ret0)
model = numqi.entangle.DensityMatrixLinearEntropyModel([dim,dim], num_ensemble=27, kind='concave')
ret1 = []
for alpha_i in tqdm(alpha_list):
model.set_density_matrix(numqi.state.Werner(dim, alpha=alpha_i))
ret1.append(-numqi.optimize.minimize(model, num_repeat=3, tol=1e-10, print_every_round=0).fun)
ret1 = np.array(ret1)
fig,ax = plt.subplots()
ax.axvline(1/dim, color='r')
ax.plot(alpha_list, ret0, label='convex (LEE)')
ax.plot(alpha_list, ret1, label='concave')
ax.legend()
# ax.set_yscale('log')
ax.set_xlabel('alpha')
ax.set_ylabel('linear entropy')
ax.set_title(f'Werner({dim})')
fig.tight_layout()
Horodecki states¶
rho = numqi.state.get_bes3x3_Horodecki1997(0.23)
plist = np.linspace(0.92, 1, 30)
plist_ppt = plist[::3] #to save time
# about 3 minutes
tmp0 = np.stack([numqi.utils.hf_interpolate_dm(rho,alpha=p) for p in plist_ppt])
ret_ppt = numqi.entangle.get_linear_entropy_entanglement_ppt(tmp0, (3,3), use_tqdm=True)
# about 1 minute
ret_gd = []
model = numqi.entangle.DensityMatrixLinearEntropyModel([3,3], num_ensemble=27, kind='convex')
for p in tqdm(plist):
model.set_density_matrix(numqi.utils.hf_interpolate_dm(rho, alpha=p))
ret_gd.append(numqi.optimize.minimize(model, num_repeat=3, tol=1e-10, print_every_round=0).fun)
ret_gd = np.array(ret_gd)
fig,ax = plt.subplots()
ax.plot(plist_ppt, ret_ppt, 'x', label='PPT')
ax.plot(plist, ret_gd, label='gradient descent')
ax.legend()
ax.set_xlabel('p')
ax.set_ylabel('linear entropy')
ax.set_yscale('log')
ax.set_title('Horodecki1997-2qutrit(0.23)')
fig.tight_layout()