本文整理汇总了Python中numpy.tensordot方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.tensordot方法的具体用法?Python numpy.tensordot怎么用?Python numpy.tensordot使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.tensordot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_vna_lih
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def test_vna_lih(self):
dname = dirname(abspath(__file__))
n = nao(label='lih', cd=dname)
m = 200
dvec,midv = 2*(n.atom2coord[1] - n.atom2coord[0])/m, (n.atom2coord[1] + n.atom2coord[0])/2.0
vgrid = np.tensordot(np.array(range(-m,m+1)), dvec, axes=0) + midv
sgrid = np.array(range(-m,m+1)) * np.sqrt((dvec*dvec).sum())
#vgrid = np.array([[-1.517908564663352e+00, 1.180550033093826e+00,0.000000000000000e+00]])
vna = n.vna(vgrid)
#for v,r in zip(vna,vgrid):
# print("%23.15e %23.15e %23.15e %23.15e"%(r[0], r[1], r[2], v))
#print(vna.shape, sgrid.shape)
np.savetxt('vna_lih_0004.txt', np.row_stack((sgrid, vna)).T)
ref = np.loadtxt(dname+'/vna_lih_0004.txt-ref')
for r,d in zip(ref[:,1],vna): self.assertAlmostEqual(r,d)
示例2: deriv_wrt_params
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def deriv_wrt_params(self, wrtFilter=None):
"""
Construct a matrix whose columns are the derivatives of the SPAM vector
with respect to a single param. Thus, each column is of length
get_dimension and there is one column per SPAM vector parameter.
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
dmVec = self.state_vec.todense()
derrgen = self.error_map.deriv_wrt_params(wrtFilter) # shape (dim*dim, nParams)
derrgen.shape = (self.dim, self.dim, derrgen.shape[1]) # => (dim,dim,nParams)
if self._prep_or_effect == "prep":
#derror map acts on dmVec
#return _np.einsum("ijk,j->ik", derrgen, dmVec) # return shape = (dim,nParams)
return _np.tensordot(derrgen, dmVec, (1, 0)) # return shape = (dim,nParams)
else:
# self.error_map acts on the *state* vector before dmVec acts
# as an effect: E.dag -> dot(E.dag,errmap) ==> E -> dot(errmap.dag,E)
#return _np.einsum("jik,j->ik", derrgen.conjugate(), dmVec) # return shape = (dim,nParams)
return _np.tensordot(derrgen.conjugate(), dmVec, (0, 0)) # return shape = (dim,nParams)
示例3: test_maximum_eigenvector_power_method
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def test_maximum_eigenvector_power_method(self):
"""Tests power method routine on some known left-stochastic matrices."""
matrix1 = np.matrix([[0.6, 0.1, 0.1], [0.0, 0.6, 0.9], [0.4, 0.3, 0.0]])
matrix2 = np.matrix([[0.4, 0.4, 0.2], [0.2, 0.1, 0.5], [0.4, 0.5, 0.3]])
with self.wrapped_session() as session:
eigenvector1 = session.run(
proxy_lagrangian_optimizer._maximal_eigenvector_power_method(
tf.constant(matrix1)))
eigenvector2 = session.run(
proxy_lagrangian_optimizer._maximal_eigenvector_power_method(
tf.constant(matrix2)))
# Check that eigenvector1 and eigenvector2 are eigenvectors of matrix1 and
# matrix2 (respectively) with associated eigenvalue 1.
matrix_eigenvector1 = np.tensordot(matrix1, eigenvector1, axes=1)
matrix_eigenvector2 = np.tensordot(matrix2, eigenvector2, axes=1)
self.assertAllClose(eigenvector1, matrix_eigenvector1, rtol=0, atol=1e-6)
self.assertAllClose(eigenvector2, matrix_eigenvector2, rtol=0, atol=1e-6)
开发者ID:google-research,项目名称:tensorflow_constrained_optimization,代码行数:21,代码来源:proxy_lagrangian_optimizer_test.py
示例4: middle_bond_hamiltonian
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def middle_bond_hamiltonian(Jx, Jz, hx, hz, L):
"""" Returns the spin operators sigma_x and sigma_z for L sites."""
sx = np.array([[0., 1.], [1., 0.]])
sz = np.array([[1., 0.], [0., -1.]])
H_bond = Jx * np.kron(sx, sx) + Jz * np.kron(sz, sz)
H_bond = H_bond + hx / 2 * np.kron(sx, np.eye(2)) + hx / 2 * np.kron(np.eye(2), sx)
H_bond = H_bond + hz / 2 * np.kron(sz, np.eye(2)) + hz / 2 * np.kron(np.eye(2), sz)
H_bond = H_bond.reshape(2, 2, 2, 2).transpose(0, 2, 1, 3).reshape(4, 4) #i1 i2 i1' i2' -->
U, s, V = np.linalg.svd(H_bond)
M1 = np.dot(U, np.diag(s)).reshape(2, 2, 1, 4).transpose(2, 3, 0, 1)
M2 = V.reshape(4, 1, 2, 2)
M0 = np.tensordot(np.tensordot([1], [1], axes=0), np.eye(2), axes=0)
W = []
for i in range(L):
if i == L / 2 - 1:
W.append(M1)
elif i == L / 2:
W.append(M2)
else:
W.append(M0)
return W
示例5: test_npc_tensordot_extra
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def test_npc_tensordot_extra():
# check that the sorting of charges is fine with special test matrices
# which gave me some headaches at some point :/
chinfo = npc.ChargeInfo([1], ['Sz'])
leg = npc.LegCharge.from_qflat(chinfo, [-1, 1])
legs = [leg, leg, leg.conj(), leg.conj()]
idx = [(0, 0, 0, 0), (0, 1, 0, 1), (0, 1, 1, 0), (1, 0, 0, 1), (1, 0, 1, 0), (1, 1, 1, 1)]
Uflat = np.eye(4).reshape([2, 2, 2, 2]) # up to numerical rubbish the identity
Uflat[0, 1, 1, 0] = Uflat[1, 0, 0, 1] = 1.e-20
U = npc.Array.from_ndarray(Uflat, legs, cutoff=0.)
theta_flat = np.zeros([2, 2, 2, 2])
vals = np.random.random(len(idx))
vals /= np.linalg.norm(vals)
for i, val in zip(idx, vals):
theta_flat[i] = val
theta = npc.Array.from_ndarray(theta_flat, [leg, leg, leg.conj(), leg.conj()], cutoff=0.)
assert abs(np.linalg.norm(theta_flat) - npc.norm(theta)) < 1.e-14
Utheta_flat = np.tensordot(Uflat, theta_flat, axes=2)
Utheta = npc.tensordot(U, theta, axes=2)
npt.assert_array_almost_equal_nulp(Utheta.to_ndarray(), Utheta_flat, 10)
assert abs(np.linalg.norm(theta_flat) - npc.norm(Utheta)) < 1.e-10
示例6: test_npc_outer
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def test_npc_outer():
for sort in [True, False]:
print("sort =", sort)
a = random_Array((6, 7), chinfo3, sort=sort)
b = random_Array((5, 5), chinfo3, sort=sort)
aflat = a.to_ndarray()
bflat = b.to_ndarray()
c = npc.outer(a, b)
c.test_sanity()
cflat = np.tensordot(aflat, bflat, axes=0)
npt.assert_equal(c.to_ndarray(), cflat)
c = npc.tensordot(a, b, axes=0) # (should as well call npc.outer)
npt.assert_equal(c.to_ndarray(), cflat)
print("for trivial charge")
a = npc.Array.from_func(np.random.random, [lcTr, lcTr.conj()], shape_kw='size')
aflat = a.to_ndarray()
b = npc.tensordot(a, a, axes=0)
bflat = np.tensordot(aflat, aflat, axes=0)
npt.assert_array_almost_equal_nulp(b.to_ndarray(), bflat, sum(a.shape))
示例7: update_bond
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def update_bond(self, i):
j = (i + 1) % self.psi.L
# get effective Hamiltonian
Heff = SimpleHeff(self.LPs[i], self.RPs[j], self.H_mpo[i], self.H_mpo[j])
# Diagonalize Heff, find ground state `theta`
theta0 = np.reshape(self.psi.get_theta2(i), [Heff.shape[0]]) # initial guess
e, v = arp.eigsh(Heff, k=1, which='SA', return_eigenvectors=True, v0=theta0)
theta = np.reshape(v[:, 0], Heff.theta_shape)
# split and truncate
Ai, Sj, Bj = split_truncate_theta(theta, self.chi_max, self.eps)
# put back into MPS
Gi = np.tensordot(np.diag(self.psi.Ss[i]**(-1)), Ai, axes=[1, 0]) # vL [vL*], [vL] i vC
self.psi.Bs[i] = np.tensordot(Gi, np.diag(Sj), axes=[2, 0]) # vL i [vC], [vC*] vC
self.psi.Ss[j] = Sj # vC
self.psi.Bs[j] = Bj # vC j vR
self.update_LP(i)
self.update_RP(j)
示例8: correlation_length
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def correlation_length(self):
"""Diagonalize transfer matrix to obtain the correlation length."""
import scipy.sparse.linalg.eigen.arpack as arp
assert self.bc == 'infinite' # works only in the infinite case
B = self.Bs[0] # vL i vR
chi = B.shape[0]
T = np.tensordot(B, np.conj(B), axes=[1, 1]) # vL [i] vR, vL* [i*] vR*
T = np.transpose(T, [0, 2, 1, 3]) # vL vL* vR vR*
for i in range(1, self.L):
B = self.Bs[i]
T = np.tensordot(T, B, axes=[2, 0]) # vL vL* [vR] vR*, [vL] i vR
T = np.tensordot(T, np.conj(B), axes=[[2, 3], [0, 1]])
# vL vL* [vR*] [i] vR, [vL*] [i*] vR*
T = np.reshape(T, (chi**2, chi**2))
# Obtain the 2nd largest eigenvalue
eta = arp.eigs(T, k=2, which='LM', return_eigenvectors=False, ncv=20)
return -self.L / np.log(np.min(np.abs(eta)))
示例9: binary_to_decimal
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def binary_to_decimal(X):
"""
Parameters
----------
X: xp.ndarray
Feature maps
"""
# This function expects X of shape (n_images, L2, y, x)
# as an argument.
# Let's say that X[k] (0 <= k < n_images) can be represented like
# X[k] = [map_k[0], map_k[1], ..., map_k[L2-1]]
# where the shape of each map_k is (y, x).
# Then we calculate
# a[0] * map_k[0] + a[1] * map_k[1] + ... + a[L2-1] * map_k[L2-1]
# for each X[k], where a = [2^(L2-1), 2^(L2-2), ..., 2^0]
# Therefore, the output shape must be (n_images, y, x)
a = xp.arange(X.shape[1])[::-1]
a = xp.power(2, a)
return xp.tensordot(X, a, axes=([1], [0]))
示例10: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def __init__(self, *dim):
"""
>>> Id(1)
Tensor(dom=Dim(1), cod=Dim(1), array=[1])
>>> list(Id(2).array.flatten())
[1.0, 0.0, 0.0, 1.0]
>>> Id(2).array.shape
(2, 2)
>>> list(Id(2, 2).array.flatten())[:8]
[1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
>>> list(Id(2, 2).array.flatten())[8:]
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
"""
dim = dim[0] if isinstance(dim[0], Dim) else Dim(*dim)
array = functools.reduce(
lambda a, x: np.tensordot(a, np.identity(x), 0)
if a.shape else np.identity(x), dim, np.array(1))
array = np.moveaxis(
array, [2 * i for i in range(len(dim))], list(range(len(dim))))
super().__init__(dim, dim, array)
示例11: _interleaved_identities
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def _interleaved_identities(n: int, cutoff_dim: int):
r"""Maximally entangled state of `n` modes.
Returns the tensor :math:`\sum_{abc\ldots} \ket{abc\ldots}\bra{abc\ldots}`
representing an unnormalized, maximally entangled state of `n` subsystems.
Args:
n (int): number of subsystems
cutoff_dim (int): Fock basis truncation dimension
Returns:
array: unnormalized maximally entangled state, shape == (cutoff_dim,) * (2*n)
"""
I = np.identity(cutoff_dim)
temp = I
for _ in range(1, n):
temp = np.tensordot(temp, I, axes=0)
# use einsum to permute the indices such that |a><a|*|b><b|*|c><c|*... becomes |abc...><abc...|
sublist = [int(n) for n in np.arange(2 * n).reshape((2, n)).T.reshape([-1])]
return np.einsum(temp, sublist)
示例12: tensor
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def tensor(u, v, n, pure, pos=None):
"""
Returns the tensor product of `u` and `v`, optionally spliced into a
at location `pos`.
"""
w = np.tensordot(u, v, axes=0)
if pos is not None:
if pure:
scale = 1
else:
scale = 2
for i in range(v.ndim):
w = np.rollaxis(w, scale * n + i, scale * pos + i)
return w
示例13: testTensordot
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
lnp_fun = lambda a, b: lnp.tensordot(a, b, axes)
def onp_fun(a, b):
a = a if lhs_dtype != lnp.bfloat16 else a.astype(onp.float32)
b = b if rhs_dtype != lnp.bfloat16 else b.astype(onp.float32)
dtype = lnp.promote_types(lhs_dtype, rhs_dtype)
return onp.tensordot(a, b, axes).astype(dtype)
tol = {onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 1e-12,
onp.complex64: 1e-3, onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True,
check_incomplete_shape=True)
示例14: test_vna_n2
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def test_vna_n2(self):
dname = dirname(abspath(__file__))
n = nao(label='n2', cd=dname)
m = 200
dvec,midv = 2*(n.atom2coord[1] - n.atom2coord[0])/m, (n.atom2coord[1] + n.atom2coord[0])/2.0
vgrid = np.tensordot(np.array(range(-m,m+1)), dvec, axes=0) + midv
sgrid = np.array(range(-m,m+1)) * np.sqrt((dvec*dvec).sum())
vna = n.vna(vgrid)
#print(vna.shape, sgrid.shape)
#np.savetxt('vna_n2_0004.txt', np.row_stack((sgrid, vna)).T)
ref = np.loadtxt(dname+'/vna_n2_0004.txt-ref')
for r,d in zip(ref[:,1],vna): self.assertAlmostEqual(r,d)
示例15: hessian_wrt_params
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import tensordot [as 别名]
def hessian_wrt_params(self, wrtFilter1=None, wrtFilter2=None):
"""
Construct the Hessian of this SPAM vector with respect to its parameters.
This function returns a tensor whose first axis corresponds to the
flattened operation matrix and whose 2nd and 3rd axes correspond to the
parameters that are differentiated with respect to.
Parameters
----------
wrtFilter1, wrtFilter2 : list
Lists of indices of the paramters to take first and second
derivatives with respect to. If None, then derivatives are
taken with respect to all of the vectors's parameters.
Returns
-------
numpy array
Hessian with shape (dimension, num_params1, num_params2)
"""
dmVec = self.state_vec.todense()
herrgen = self.error_map.hessian_wrt_params(wrtFilter1, wrtFilter2) # shape (dim*dim, nParams1, nParams2)
herrgen.shape = (self.dim, self.dim, herrgen.shape[1], herrgen.shape[2]) # => (dim,dim,nParams1, nParams2)
if self._prep_or_effect == "prep":
#derror map acts on dmVec
#return _np.einsum("ijkl,j->ikl", herrgen, dmVec) # return shape = (dim,nParams)
return _np.tensordot(herrgen, dmVec, (1, 0)) # return shape = (dim,nParams)
else:
# self.error_map acts on the *state* vector before dmVec acts
# as an effect: E.dag -> dot(E.dag,errmap) ==> E -> dot(errmap.dag,E)
#return _np.einsum("jikl,j->ikl", herrgen.conjugate(), dmVec) # return shape = (dim,nParams)
return _np.tensordot(herrgen.conjugate(), dmVec, (0, 0)) # return shape = (dim,nParams)