本文整理汇总了Python中torch.tensordot方法的典型用法代码示例。如果您正苦于以下问题:Python torch.tensordot方法的具体用法?Python torch.tensordot怎么用?Python torch.tensordot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.tensordot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: idct_8x8
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def idct_8x8(image):
alpha = np.array([1. / np.sqrt(2)] + [1] * 7)
alpha = torch.FloatTensor(np.outer(alpha, alpha)).cuda()
image = image * alpha
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos(
(2 * v + 1) * y * np.pi / 16)
# result = 0.25 * torch.tensordot(image, torch.as_tensor(tensor, device="cuda"), dims=2) + 128
result = 0.25 * tensordot_pytorch(image, torch.as_tensor(tensor, device="cuda"), dims=2) + 128
result.view(image.size())
return result
# -3. Block joining
示例2: get_obs
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def get_obs(Asymm, H, Sx, Sy, Sz, C, E ):
# A(phy,u,l,d,r), C(d,r), E(u,r,d)
Da = Asymm.size()
Td = torch.einsum('mefgh,nabcd->eafbgchdmn',(Asymm,Asymm)).contiguous().view(Da[1]**2, Da[2]**2, Da[3]**2, Da[4]**2, Da[0], Da[0])
#print( torch.dist( Td, Td.permute(0,3,2,1,4,5) ) ) # test left-right reflection symmetry of Td
CE = torch.tensordot(C,E,([1],[0])) # C(1d)E(dga)->CE(1ga)
EL = torch.tensordot(E,CE,([2],[0])) # E(2e1)CE(1ga)->EL(2ega) use E(2e1) == E(1e2)
EL = torch.tensordot(EL,Td,([1,2],[1,0])) # EL(2ega)T(gehbmn)->EL(2ahbmn)
EL = torch.tensordot(EL,CE,([0,2],[0,1])) # EL(2ahbmn)CE(2hc)->EL(abmnc), use CE(2hc) == CE(1ga)
Rho = torch.tensordot(EL,EL,([0,1,4],[0,1,4])).permute(0,2,1,3).contiguous().view(Da[0]**2,Da[0]**2)
# print( (Rho-Rho.t()).norm() )
Rho = 0.5*(Rho + Rho.t())
Tnorm = Rho.trace()
Energy = torch.mm(Rho,H).trace()/Tnorm
Mx = torch.mm(Rho,Sx).trace()/Tnorm
My = torch.mm(Rho,Sy).trace()/Tnorm
Mz = torch.mm(Rho,Sz).trace()/Tnorm
#print("Tnorm = %g, Energy = %g " % (Tnorm.item(), Energy.item()) )
return Energy, Mx, My, Mz
示例3: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def forward(self, inputs):
embeds_vec_list = inputs
row = []
col = []
for r, c in itertools.combinations(embeds_vec_list, 2):
row.append(r)
col.append(c)
p = torch.cat(row, dim=1)
q = torch.cat(col, dim=1)
inner_product = p * q
bi_interaction = inner_product
attention_temp = F.relu(torch.tensordot(
bi_interaction, self.attention_W, dims=([-1], [0])) + self.attention_b)
self.normalized_att_score = F.softmax(torch.tensordot(
attention_temp, self.projection_h, dims=([-1], [0])), dim=1)
attention_output = torch.sum(
self.normalized_att_score * bi_interaction, dim=1)
attention_output = self.dropout(attention_output) # training
afm_out = torch.tensordot(
attention_output, self.projection_p, dims=([-1], [0]))
return afm_out
示例4: tensordot
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def tensordot(x, y, dims):
"""
Wrapper around :func:`torch.tensordot` or :func:`np.tensordot`
to operate on real-valued Funsors.
Note this operates only on the ``output`` tensor. To perform sum-product
contractions on named dimensions, instead use ``+`` and
:class:`~funsor.terms.Reduce`.
Arguments should satisfy::
len(x.shape) >= dims
len(y.shape) >= dims
dims == 0 or x.shape[-dims:] == y.shape[:dims]
:param Funsor x: A left hand argument.
:param Funsor y: A y hand argument.
:param int dims: The number of dimension of overlap of output shape.
:rtype: Funsor
"""
assert dims >= 0
assert len(x.shape) >= dims
assert len(y.shape) >= dims
assert dims == 0 or x.shape[-dims:] == y.shape[:dims]
x_start, x_end = 0, len(x.output.shape)
y_start = x_end - dims
y_end = y_start + len(y.output.shape)
symbols = 'abcdefghijklmnopqrstuvwxyz'
equation = '{},{}->{}'.format(symbols[x_start:x_end],
symbols[y_start:y_end],
symbols[x_start:y_start] + symbols[x_end:y_end])
return Einsum(equation, (x, y))
示例5: _numeric_tensordot
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def _numeric_tensordot(x, y, dim):
if get_backend() == "torch":
import torch
return torch.tensordot(x, y, dim)
else:
return np.tensordot(x, y, axes=dim)
示例6: test_tensor_tensordot
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def test_tensor_tensordot(x_shape, xy_shape, y_shape):
x = randn(x_shape + xy_shape)
y = randn(xy_shape + y_shape)
dim = len(xy_shape)
actual = tensordot(Tensor(x), Tensor(y), dim)
expected = Tensor(_numeric_tensordot(x, y, dim))
assert_close(actual, expected, atol=1e-5, rtol=None)
示例7: rgb_to_ycbcr
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def rgb_to_ycbcr(image):
matrix = np.array(
[[65.481, 128.553, 24.966],
[-37.797, -74.203, 112.],
[112., -93.786, -18.214]],
dtype=np.float32).T / 255
shift = torch.as_tensor([16., 128., 128.], device="cuda")
# result = torch.tensordot(image, torch.as_tensor(matrix, device="cuda"), dims=1) + shift
result = tensordot_pytorch(image, matrix, dims=1) + shift
result.view(image.size())
return result
示例8: rgb_to_ycbcr_jpeg
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def rgb_to_ycbcr_jpeg(image):
matrix = np.array(
[[0.299, 0.587, 0.114],
[-0.168736, -0.331264, 0.5],
[0.5, -0.418688, -0.081312]],
dtype=np.float32).T
shift = torch.as_tensor([0., 128., 128.], device="cuda")
# result = torch.tensordot(image, torch.as_tensor(matrix, device="cuda"), dims=1) + shift
result = tensordot_pytorch(image, torch.as_tensor(matrix, device='cuda'), dims=1) + shift
result.view(image.size())
return result
# 2. Chroma subsampling
示例9: dct_8x8
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def dct_8x8(image):
image = image - 128
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * x + 1) * u * np.pi / 16) * np.cos(
(2 * y + 1) * v * np.pi / 16)
alpha = np.array([1. / np.sqrt(2)] + [1] * 7)
scale = torch.FloatTensor(np.outer(alpha, alpha) * 0.25).cuda()
#result = scale * torch.tensordot(image, torch.as_tensor(tensor, device="cuda"), dims=2)
result = scale * tensordot_pytorch(image, torch.as_tensor(tensor, device="cuda"), dims=2)
result.view(image.size())
return result
示例10: ycbcr_to_rgb
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def ycbcr_to_rgb(image):
matrix = np.array(
[[298.082, 0, 408.583],
[298.082, -100.291, -208.120],
[298.082, 516.412, 0]],
dtype=np.float32).T / 256
shift = torch.as_tensor([-222.921, 135.576, -276.836], device="cuda")
# result = torch.tensordot(image, torch.tensor(matrix, device="cuda"), dims=1) + shift
result = tensordot_pytorch(image, torch.tensor(matrix, device="cuda"), dims=1) + shift
result.view(image.size())
return result
示例11: test_tensordot
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def test_tensordot():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(2 * np.ones((2, 3, 4)))
b = backend.convert_to_tensor(np.ones((2, 3, 4)))
actual = backend.tensordot(a, b, ((1, 2), (1, 2)))
expected = np.array([[24.0, 24.0], [24.0, 24.0]])
np.testing.assert_allclose(expected, actual)
示例12: test_eigsh_lanczos_0
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def test_eigsh_lanczos_0():
#this test should just not crash
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
D = 4
init = backend.randn((2, 2, 2), dtype=dtype)
tmp = backend.randn((8, 8), dtype=dtype)
H = tmp + backend.transpose(backend.conj(tmp), (1, 0))
H = H.reshape([2, 2, 2, 2, 2, 2])
def mv(x, mat):
return torch.tensordot(mat, x, ([0, 3, 5], [2, 0, 1])).permute([2, 0, 1])
backend.eigsh_lanczos(mv, [H], init, num_krylov_vecs=D)
示例13: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def forward(self, inputs, embed=True):
if embed:
return torch.nn.functional.embedding(inputs, self.w)
else:
return torch.tensordot(inputs, self.w.t(), 1) + self.b
示例14: sample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def sample(self, p, z=None):
"""Input p to be shaped [T,B,A,P] or [B,A,P], A: number of actions, P:
number of atoms. Optional input z is domain of atom-values, shaped
[P]. Vector epsilon of lenght B will apply across Batch dimension."""
q = torch.tensordot(p, z or self.z, dims=1)
return super().sample(q)
示例15: reweight
# 需要导入模块: import torch [as 别名]
# 或者: from torch import tensordot [as 别名]
def reweight(self, msa1hot):
# Reweight
seqlen = msa1hot.size(1)
id_min = seqlen * self.msa_cutoff
id_mtx = torch.tensordot(msa1hot, msa1hot, [[1, 2], [1, 2]])
id_mask = id_mtx > id_min
weights = 1.0 / id_mask.float().sum(-1)
return weights