本文整理汇总了Python中torch.mv方法的典型用法代码示例。如果您正苦于以下问题:Python torch.mv方法的具体用法?Python torch.mv怎么用?Python torch.mv使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.mv方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_one_iter
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def compute_one_iter(self):
if not self.initialized:
raise ValueError('Layer needs to be initialized first.')
domain, codomain = self.compute_domain_codomain()
if self.kernel_size == (1, 1):
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach().view(self.out_channels, self.in_channels)
u = normalize_u(torch.mv(weight, v), codomain)
v = normalize_v(torch.mv(weight.t(), u), domain)
return torch.dot(u, torch.mv(weight, v))
else:
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach()
c, h, w = self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item())
u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
out_shape = u_s.shape
u = normalize_u(u_s.view(-1), codomain)
v_s = F.conv_transpose2d(
u.view(out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0
)
v = normalize_v(v_s.view(-1), domain)
weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
return torch.dot(u.view(-1), weight_v.view(-1))
示例2: unify_sentence
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def unify_sentence(self, sentence_feature, one_sentence_embedding):
"""
Unify Sentence By Token Importance
"""
sent_len = one_sentence_embedding.size()[0]
var_token = torch.zeros(sent_len, device=one_sentence_embedding.device)
for token_index in range(sent_len):
token_feature = sentence_feature[:, token_index, :]
sim_map = self.cosine_similarity_torch(token_feature)
var_token[token_index] = torch.var(sim_map.diagonal(-1))
var_token = var_token / torch.sum(var_token)
sentence_embedding = torch.mv(one_sentence_embedding.t(), var_token)
return sentence_embedding
示例3: C3
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def C3():
n = 10
A = torch.zeros(n, n)
A.view(-1)[::n + 1] = -2
A.view(-1)[n::n + 1] = 1
A.view(-1)[1::n + 1] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(n)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
示例4: C4
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def C4():
n = 51
A = torch.zeros(n, n)
A.view(-1)[::n + 1] = -2
A.view(-1)[n::n + 1] = 1
A.view(-1)[1::n + 1] = 1
def diffeq(t, y):
return torch.mv(A, y)
def init():
y0 = torch.zeros(n)
y0[0] = 1
return torch.tensor(0.), y0
return diffeq, init, None
示例5: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def forward(self, *args):
r"""Computes the output of the ``module`` and appies spectral normalization to the
``name`` attribute of the ``module``.
Returns:
The output of the ``module``.
"""
height = self.w_bar.data.shape[0]
for _ in range(self.power_iterations):
self.v.data = self._l2normalize(
torch.mv(torch.t(self.w_bar.view(height, -1)), self.u)
)
self.u.data = self._l2normalize(
torch.mv(self.w_bar.view(height, -1), self.v)
)
sigma = self.u.dot(self.w_bar.view(height, -1).mv(self.v))
setattr(self.module, self.name, self.w_bar / sigma.expand_as(self.w_bar))
return self.module.forward(*args)
示例6: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def forward(self, z):
# Create uhat such that it is parallel to w
uw = torch.dot(self.u, self.w)
muw = -1 + F.softplus(uw)
uhat = self.u + (muw - uw) * torch.transpose(self.w, 0, -1) / torch.sum(self.w ** 2)
# Equation 21 - Transform z
zwb = torch.mv(z, self.w) + self.b
f_z = z + (uhat.view(1, -1) * F.tanh(zwb).view(-1, 1))
# Compute the Jacobian using the fact that
# tanh(x) dx = 1 - tanh(x)**2
psi = (1 - F.tanh(zwb)**2).view(-1, 1) * self.w.view(1, -1)
psi_u = torch.mv(psi, uhat)
# Return the transformed output along
# with log determninant of J
logdet_jacobian = torch.log(torch.abs(1 + psi_u) + 1e-8)
return f_z, logdet_jacobian
示例7: _oscar_prox_jacobian
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def _oscar_prox_jacobian(y_star, dout=None):
y_star = y_star.numpy()
dim = y_star.shape[0]
J = torch.zeros(dim, dim)
_, inv, counts = np.unique(np.abs(y_star),
return_inverse=True,
return_counts=True)
for i in range(dim):
for j in range(dim):
if (inv[i] == inv[j] and
y_star[i] != 0):
J[i, j] = (np.sign(y_star[i]) * np.sign(y_star[j])
/ counts[inv[i]])
if dout is not None:
return torch.mv(J, dout)
else:
return J
示例8: _fused_prox_jacobian
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def _fused_prox_jacobian(y_hat, dout=None):
"""reference naive implementation: construct the jacobian"""
dim = y_hat.shape[0]
groups = torch.zeros(dim)
J = torch.zeros(dim, dim)
current_group = 0
for i in range(1, dim):
if y_hat[i] == y_hat[i - 1]:
groups[i] = groups[i - 1]
else:
current_group += 1
groups[i] = current_group
for i in range(dim):
for j in range(dim):
if groups[i] == groups[j]:
n_fused = (groups == groups[i]).sum()
J[i, j] = 1 / n_fused.to(y_hat.dtype)
if dout is not None:
return torch.mv(J, dout)
else:
return J
示例9: backward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def backward(self, grad_output):
input, weight, bias = self.saved_tensors
grad_input = grad_weight = grad_bias = None
if self.needs_input_grad[0]:
grad_output = grad_output.squeeze()
grad_input = torch.mm(grad_output, weight)
if self.needs_input_grad[1]:
grad_weight = torch.mm(grad_output.t(), input)
if bias is not None and self.needs_input_grad[2]:
grad_bias = torch.mv(grad_output.t(), self.add_buffer)
if bias is not None:
return grad_input, grad_weight, grad_bias
else:
return grad_input, grad_weight
示例10: dot_nd
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def dot_nd(query, candidates):
"""
Perform a dot product between a query and n-dimensional candidates.
:param query: (Tensor) A vector to query, whose size is
(query_dim,)
:param candidates: (Tensor) A n-dimensional tensor to be multiplied
by query, whose size is (d0, d1, ..., dn, query_dim)
:returns: The result of the dot product, whose size is
(d0, d1, ..., dn)
"""
cands_size = candidates.size()
cands_flat = candidates.view(-1, cands_size[-1])
output_flat = torch.mv(cands_flat, query)
output = output_flat.view(*cands_size[:-1])
return output
示例11: dot_nd
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def dot_nd(query, candidates):
"""
Perform a dot product between a query and n-dimensional candidates.
Args:
query (Variable): A vector to query, whose size is
(query_dim,)
candidates (Variable): A n-dimensional tensor to be multiplied
by query, whose size is (d0, d1, ..., dn, query_dim)
Returns:
output: The result of the dot product, whose size is
(d0, d1, ..., dn)
"""
cands_size = candidates.size()
cands_flat = candidates.view(-1, cands_size[-1])
output_flat = torch.mv(cands_flat, query)
output = output_flat.view(*cands_size[:-1])
return output
示例12: cos_nd
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def cos_nd(query, candidates):
"""
Perform cosine between a query and n-dimensional candidates.
Args:
query (Variable): A vector to query, whose size is
(query_dim,)
candidates (Variable): A n-dimensional tensor to be multiplied
by query, whose size is (d0, d1, ..., dn, query_dim)
Returns:
output: The result of the cosine operator, whose size is
(d0, d1, ..., dn)
"""
cands_size = candidates.size()
cands_flat = candidates.view(-1, cands_size[-1])
output_flat = torch.mv(cands_flat, query)
output = output_flat.view(*cands_size[:-1])
lengths = (torch.sum(candidates ** 2, dim=-1) + 1e-10) ** 0.5
lengths = lengths.contiguous().view(output.size())
output = output / lengths / ((torch.sum(query ** 2) + 1e-10) ** 0.5)
return output
示例13: _update_u_v
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
示例14: sn_weight
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def sn_weight(weight, u, height, n_power_iterations):
weight.requires_grad_(False)
for _ in range(n_power_iterations):
v = l2normalize(torch.mv(weight.view(height, -1).t(), u))
u = l2normalize(torch.mv(weight.view(height, -1), v))
weight.requires_grad_(True)
sigma = u.dot(weight.view(height, -1).mv(v))
return torch.div(weight, sigma), u
示例15: unify_token
# 需要导入模块: import torch [as 别名]
# 或者: from torch import mv [as 别名]
def unify_token(self, token_feature):
"""
Unify Token Representation
"""
window_size = self.context_window_size
alpha_alignment = torch.zeros(token_feature.size()[0], device=token_feature.device)
alpha_novelty = torch.zeros(token_feature.size()[0], device=token_feature.device)
for k in range(token_feature.size()[0]):
left_window = token_feature[k - window_size:k, :]
right_window = token_feature[k + 1:k + window_size + 1, :]
window_matrix = torch.cat([left_window, right_window, token_feature[k, :][None, :]])
Q, R = torch.qr(window_matrix.T)
r = R[:, -1]
alpha_alignment[k] = torch.mean(self.norm_vector(R[:-1, :-1], dim=0), dim=1).matmul(R[:-1, -1]) / torch.norm(r[:-1])
alpha_alignment[k] = 1 / (alpha_alignment[k] * window_matrix.size()[0] * 2)
alpha_novelty[k] = torch.abs(r[-1]) / torch.norm(r)
# Sum Norm
alpha_alignment = alpha_alignment / torch.sum(alpha_alignment) # Normalization Choice
alpha_novelty = alpha_novelty / torch.sum(alpha_novelty)
alpha = alpha_novelty + alpha_alignment
alpha = alpha / torch.sum(alpha) # Normalize
out_embedding = torch.mv(token_feature.t(), alpha)
return out_embedding