本文整理汇总了Python中torch.svd方法的典型用法代码示例。如果您正苦于以下问题:Python torch.svd方法的具体用法?Python torch.svd怎么用?Python torch.svd使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.svd方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: euclidean_stiefel_case
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def euclidean_stiefel_case():
torch.manual_seed(42)
shape = manifold_shapes[geoopt.manifolds.EuclideanStiefel]
ex = torch.randn(*shape, dtype=torch.float64)
ev = torch.randn(*shape, dtype=torch.float64)
u, _, v = torch.svd(ex)
x = u @ v.t()
nonsym = x.t() @ ev
v = ev - x @ (nonsym + nonsym.t()) / 2
manifold = geoopt.manifolds.EuclideanStiefel()
x = geoopt.ManifoldTensor(x, manifold=manifold)
case = UnaryCase(shape, x, ex, v, ev, manifold)
yield case
manifold = geoopt.manifolds.EuclideanStiefelExact()
x = geoopt.ManifoldTensor(x, manifold=manifold)
case = UnaryCase(shape, x, ex, v, ev, manifold)
yield case
示例2: regularizer_orth2
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def regularizer_orth2(m):
"""
# ----------------------------------------
# Applies regularization to the training by performing the
# orthogonalization technique described in the paper
# This function is to be called by the torch.nn.Module.apply() method,
# which applies svd_orthogonalization() to every layer of the model.
# usage: net.apply(regularizer_orth2)
# ----------------------------------------
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
w = m.weight.data.clone()
c_out, c_in, f1, f2 = w.size()
# dtype = m.weight.data.type()
w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out)
u, s, v = torch.svd(w)
s_mean = s.mean()
s[s > 1.5*s_mean] = s[s > 1.5*s_mean] - 1e-4
s[s < 0.5*s_mean] = s[s < 0.5*s_mean] + 1e-4
w = torch.mm(torch.mm(u, torch.diag(s)), v.t())
m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1) # .type(dtype)
else:
pass
示例3: zca_matrix
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def zca_matrix(data_tensor):
"""
Helper function: compute ZCA whitening matrix across a dataset ~ (N, C, H, W).
"""
# 1. flatten dataset:
X = data_tensor.view(data_tensor.shape[0], -1)
# 2. zero-center the matrix:
X = rescale(X, -1., 1.)
# 3. compute covariances:
cov = torch.t(X) @ X
# 4. compute ZCA(X) == U @ (diag(1/S)) @ torch.t(V) where U, S, V = SVD(cov):
U, S, V = torch.svd(cov)
return (U @ torch.diag(torch.reciprocal(S)) @ torch.t(V))
示例4: estimate_pose
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def estimate_pose(self, pt0, pt1):
pconf2 = self.pconf.view(1, self.num_key, 1)
cent0 = torch.sum(pt0 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
cent1 = torch.sum(pt1 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
diag_mat = torch.diag(self.pconf).unsqueeze(0)
x = (pt0 - cent0).transpose(2, 1).contiguous()
y = pt1 - cent1
pred_t = cent1 - cent0
cov = torch.bmm(torch.bmm(x, diag_mat), y).contiguous().squeeze(0)
u, _, v = torch.svd(cov)
u = u.transpose(1, 0).contiguous()
d = torch.det(torch.mm(v, u)).contiguous().view(1, 1, 1).contiguous()
u = u.transpose(1, 0).contiguous().unsqueeze(0)
ud = torch.cat((u[:, :, :-1], u[:, :, -1:] * d), dim=2)
v = v.transpose(1, 0).contiguous().unsqueeze(0)
pred_r = torch.bmm(ud, v).transpose(2, 1).contiguous()
return pred_r, pred_t[:, 0, :].view(1, 3)
示例5: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def __init__(self, dims_in, correction_interval=256, clamp=5.):
super().__init__()
self.width = dims_in[0][0]
self.clamp = clamp
self.correction_interval = correction_interval
self.back_counter = np.random.randint(0, correction_interval) // 2
self.weights = torch.randn(self.width, self.width)
self.weights = self.weights + self.weights.t()
self.weights, S, V = torch.svd(self.weights)
self.weights = nn.Parameter(self.weights)
self.bias = nn.Parameter(0.05 * torch.randn(self.width))
self.scaling = nn.Parameter(0.02 * torch.randn(self.width))
self.register_backward_hook(correct_weights)
示例6: spectral_restricted_isometry_property_regularization
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def spectral_restricted_isometry_property_regularization(weights, config):
"""Requires that every set of columns of the weights, with cardinality no
larger than k, shall behave like an orthogonal system.
Also called SRIP.
References:
* Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?
Bansal et al.
NeurIPS 2018
:param weights: Learned parameters of shape (n_classes, n_features).
:return: A float scalar loss.
"""
wTw = torch.mm(weights.t(), weights)
x = wTw - torch.eye(wTw.shape[0]).to(weights.device)
_, s, _ = torch.svd(x)
loss = s[0]
return config["factor"] * loss
示例7: init_projection_matrix
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def init_projection_matrix(self, x):
# Set if using projection matrix
self.params.use_projection_matrix = self.params.get('use_projection_matrix', True)
if self.params.use_projection_matrix:
self.compressed_dim = self.fparams.attribute('compressed_dim', None)
proj_init_method = self.params.get('proj_init_method', 'pca')
if proj_init_method == 'pca':
x_mat = TensorList([e.permute(1, 0, 2, 3).reshape(e.shape[1], -1).clone() for e in x])
x_mat -= x_mat.mean(dim=1, keepdim=True)
cov_x = x_mat @ x_mat.t()
self.projection_matrix = TensorList(
[None if cdim is None else torch.svd(C)[0][:, :cdim].t().unsqueeze(-1).unsqueeze(-1).clone() for C, cdim in
zip(cov_x, self.compressed_dim)])
elif proj_init_method == 'randn':
self.projection_matrix = TensorList(
[None if cdim is None else ex.new_zeros(cdim,ex.shape[1],1,1).normal_(0,1/math.sqrt(ex.shape[1])) for ex, cdim in
zip(x, self.compressed_dim)])
else:
self.compressed_dim = x.size(1)
self.projection_matrix = TensorList([None]*len(x))
示例8: whiten
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def whiten(cF):
cFSize = cF.size()
c_mean = torch.mean(cF,1) # c x (h x w)
c_mean = c_mean.unsqueeze(1).expand_as(cF)
cF = cF - c_mean
contentConv = torch.mm(cF,cF.t()).div(cFSize[1]-1) + torch.eye(cFSize[0]).double()
c_u,c_e,c_v = torch.svd(contentConv,some=False)
k_c = cFSize[0]
for i in range(cFSize[0]):
if c_e[i] < 0.00001:
k_c = i
break
c_d = (c_e[0:k_c]).pow(-0.5)
step1 = torch.mm(c_v[:,0:k_c],torch.diag(c_d))
step2 = torch.mm(step1,(c_v[:,0:k_c].t()))
whiten_cF = torch.mm(step2,cF)
return whiten_cF
示例9: _normalize_one
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def _normalize_one(self, mat):
# U, S, V = torch.svd(A) returns the singular value
# decomposition of a real matrix A of size (n x m) such that A=USV′.
# Irrespective of the original strides, the returned matrix U will
# be transposed, i.e. with strides (1, n) instead of (n, 1).
# pytorch has native SVD function but not determinant...
# U, _, V = mat.squeeze().svd()
# S = torch.eye(self.dim)
# if U.is_cuda:
# S = S.cuda()
# S[self.dim - 1, self.dim - 1] = float(np.linalg.det(U.cpu().numpy()) *
# np.linalg.det(V.cpu().numpy()))
# mat_normalized = U.mm(S).mm(V.t_())
# pytorch SVD seems to be inaccurate, so just move to numpy immediately
mat_cpu = mat.detach().cpu().numpy().squeeze()
U, _, V = np.linalg.svd(mat_cpu, full_matrices=False)
S = np.eye(self.dim)
S[self.dim - 1, self.dim - 1] = np.linalg.det(U) * np.linalg.det(V)
mat_normalized = mat.__class__(U.dot(S).dot(V))
mat.copy_(mat_normalized)
return mat
示例10: rigid_transform_3d_pytorch
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def rigid_transform_3d_pytorch(p1, p2):
center_p1 = torch.mean(p1, dim=0, keepdim=True)
center_p2 = torch.mean(p2, dim=0, keepdim=True)
pp1 = p1 - center_p1
pp2 = p2 - center_p2
h = torch.mm(pp1.t(), pp2)
u, _, v = torch.svd(h)
r = torch.mm(v.t(), u.t())
# reflection
if np.linalg.det(r.cpu().numpy()) < 0:
v[2, :] *= -1
r = torch.mm(v.t(), u.t())
t = torch.mm(-r, center_p1.t()) + center_p2.t()
return r, t
示例11: rigid_transform_3d_numpy
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def rigid_transform_3d_numpy(p1, p2):
center_p1 = np.mean(p1, axis=0, keepdims=True)
center_p2 = np.mean(p2, axis=0, keepdims=True)
pp1 = p1 - center_p1
pp2 = p2 - center_p2
h = np.matmul(pp1.T, pp2)
u, _, v = np.linalg.svd(h)
r = np.matmul(v.T, u.T)
# reflection
if np.linalg.det(r) < 0:
v[2, :] *= -1
r = np.matmul(v.T, u.T)
t = np.matmul(-r, center_p1.T) + center_p2.T
return r, t
示例12: finalize
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def finalize(self):
"""
Finalize training with LU factorization or Pseudo-inverse
"""
# Average
self.R = self.R / self.n_samples
# SVF
(U, S, V) = torch.svd(self.R)
# Compute new singular values
Snew = torch.mm(torch.diag(S), torch.inverse(torch.diag(S) + math.pow(self.aperture, -2) * torch.eye(self.input_dim, dtype=self.dtype)))
# Apply new SVs to get the conceptor
self.C.data = torch.mm(torch.mm(U, Snew), U.t()).data
# Not in training mode anymore
self.train(False)
# end finalize
# Set conceptor
示例13: similarity
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def similarity(C1, C2):
"""
Similarity between two conceptors
:param C1:
:param C2:
:return:
"""
# Compute singular values
Ua, Sa, _ = torch.svd(C1.get_C())
Ub, Sb, _ = torch.svd(C2.get_C())
# Measure
return generalized_squared_cosine(Sa, Ua, Sb, Ub)
# end similarity
###############################################
# OPERATORS
###############################################
# Similarity with another conceptor
示例14: sim
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def sim(self, cb, measure='gsc'):
"""
Similarity with another conceptor
:param cb:
:return:
"""
# Compute singular values
Ua, Sa, _ = torch.svd(self.C)
Ub, Sb, _ = torch.svd(cb.get_C())
# Measure
if measure == 'gsc':
return generalized_squared_cosine(Sa, Ua, Sb, Ub)
# end if
# end sim
# Positive evidence
示例15: compute_A_SV
# 需要导入模块: import torch [as 别名]
# 或者: from torch import svd [as 别名]
def compute_A_SV(conceptors):
"""
Get singular values of A
:param conceptors:
:return:
"""
# A (OR of all conceptors)
A = ConceptorPool.compute_A(conceptors)
# Compute SVD
_, S, _ = torch.svd(A.get_C())
return S
# end compute_A_SV
# Compute A (OR of all conceptors