本文整理匯總了Python中torch.inverse方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.inverse方法的具體用法?Python torch.inverse怎麽用?Python torch.inverse使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.inverse方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_inverse
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def test_inverse(self):
features = 100
batch_size = 50
for num_transforms in [1, 2, 11, 12]:
with self.subTest(num_transforms=num_transforms):
transform = orthogonal.HouseholderSequence(
features=features, num_transforms=num_transforms)
matrix = transform.matrix()
inputs = torch.randn(batch_size, features)
outputs, logabsdet = transform.inverse(inputs)
self.assert_tensor_is_good(outputs, [batch_size, features])
self.assert_tensor_is_good(logabsdet, [batch_size])
self.eps = 1e-5
self.assertEqual(outputs, inputs @ matrix)
self.assertEqual(logabsdet, utils.logabsdet(matrix) * torch.ones(batch_size))
示例2: setUp
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def setUp(self):
features = 5
batch_size = 10
weight = torch.randn(features, features)
inverse = torch.randn(features, features)
logabsdet = torch.randn(1)
self.transform = Linear(features)
self.transform.bias.data = torch.randn(features) # Just so bias isn't zero.
self.inputs = torch.randn(batch_size, features)
self.outputs_fwd = self.inputs @ weight.t() + self.transform.bias
self.outputs_inv = (self.inputs - self.transform.bias) @ inverse.t()
self.logabsdet_fwd = logabsdet * torch.ones(batch_size)
self.logabsdet_inv = (-logabsdet) * torch.ones(batch_size)
# Mocks for abstract methods.
self.transform.forward_no_cache = MagicMock(
return_value=(self.outputs_fwd, self.logabsdet_fwd))
self.transform.inverse_no_cache = MagicMock(
return_value=(self.outputs_inv, self.logabsdet_inv))
self.transform.weight = MagicMock(return_value=weight)
self.transform.weight_inverse = MagicMock(return_value=inverse)
self.transform.logabsdet = MagicMock(return_value=logabsdet)
示例3: test_inverse_cache_is_used
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def test_inverse_cache_is_used(self):
self.transform.eval()
self.transform.use_cache()
self.transform.inverse(self.inputs)
self.assertTrue(self.transform.weight_inverse.called)
self.assertTrue(self.transform.logabsdet.called)
self.transform.weight_inverse.reset_mock()
self.transform.logabsdet.reset_mock()
outputs, logabsdet = self.transform.inverse(self.inputs)
# Cached values should be used.
self.assertFalse(self.transform.weight_inverse.called)
self.assertFalse(self.transform.logabsdet.called)
self.assertEqual(outputs, self.outputs_inv)
self.assertEqual(logabsdet, self.logabsdet_inv)
示例4: _vertex_decimation
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def _vertex_decimation(self, L):
max_eigenvec = self._power_iteration(L)
v_plus, v_minus = (max_eigenvec >= 0).squeeze(), (max_eigenvec < 0).squeeze()
# print(v_plus, v_minus)
# diagonal matrix, swap v_minus with v_plus not to incur in errors (does not change the matrix)
if torch.sum(v_plus) == 0.: # The matrix is diagonal, cannot reduce further
if torch.sum(v_minus) == 0.:
assert v_minus.shape[0] == L.shape[0], (v_minus.shape, L.shape)
# I assumed v_minus should have ones, but this is not necessarily the case. So I added this if
return torch.ones(v_minus.shape), L
else:
return v_minus, L
L_plus_plus = L[v_plus][:, v_plus]
L_plus_minus = L[v_plus][:, v_minus]
L_minus_minus = L[v_minus][:, v_minus]
L_minus_plus = L[v_minus][:, v_plus]
L_new = L_plus_plus - torch.mm(torch.mm(L_plus_minus, torch.inverse(L_minus_minus)), L_minus_plus)
return v_plus, L_new
示例5: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def forward(self, inputs, cond_inputs=None, mode='direct'):
if str(self.L_mask.device) != str(self.L.device):
self.L_mask = self.L_mask.to(self.L.device)
self.U_mask = self.U_mask.to(self.L.device)
self.I = self.I.to(self.L.device)
self.P = self.P.to(self.L.device)
self.sign_S = self.sign_S.to(self.L.device)
L = self.L * self.L_mask + self.I
U = self.U * self.U_mask + torch.diag(
self.sign_S * torch.exp(self.log_S))
W = self.P @ L @ U
if mode == 'direct':
return inputs @ W, self.log_S.sum().unsqueeze(0).unsqueeze(
0).repeat(inputs.size(0), 1)
else:
return inputs @ torch.inverse(
W), -self.log_S.sum().unsqueeze(0).unsqueeze(0).repeat(
inputs.size(0), 1)
示例6: compute_L_inverse
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def compute_L_inverse(self,X,Y):
N = X.size()[0] # num of points (along dim 0)
# construct matrix K
Xmat = X.expand(N,N)
Ymat = Y.expand(N,N)
P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2)
P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation
K = torch.mul(P_dist_squared,torch.log(P_dist_squared))
if self.reg_factor != 0:
K+=torch.eye(K.size(0),K.size(1))*self.reg_factor
# construct matrix L
O = torch.FloatTensor(N,1).fill_(1)
Z = torch.FloatTensor(3,3).fill_(0)
P = torch.cat((O,X,Y),1)
L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0)
Li = torch.inverse(L)
if self.use_cuda:
Li = Li.cuda()
return Li
示例7: compute_L_inverse
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def compute_L_inverse(self,X,Y):
N = X.size()[0] # num of points (along dim 0)
# construct matrix K
Xmat = X.expand(N,N)
Ymat = Y.expand(N,N)
P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2)
P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation
K = torch.mul(P_dist_squared,torch.log(P_dist_squared))
# construct matrix L
O = torch.FloatTensor(N,1).fill_(1)
Z = torch.FloatTensor(3,3).fill_(0)
P = torch.cat((O,X,Y),1)
L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0)
Li = torch.inverse(L)
if self.use_cuda:
Li = Li.cuda()
return Li
示例8: get_barycentric_coords
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def get_barycentric_coords(point, verts):
if len(verts) == 2:
diff = verts[1] - verts[0]
diff_norm = torch.norm(diff)
normalized_diff = diff / diff_norm
u = torch.dot(verts[1] - point, normalized_diff) / diff_norm
v = torch.dot(point - verts[0], normalized_diff) / diff_norm
return u, v
elif len(verts) == 3:
# TODO Area method instead of LinAlg
M = torch.cat([
torch.cat([verts[0], verts[0].new_ones(1)]).unsqueeze(1),
torch.cat([verts[1], verts[1].new_ones(1)]).unsqueeze(1),
torch.cat([verts[2], verts[2].new_ones(1)]).unsqueeze(1),
], dim=1)
invM = torch.inverse(M)
uvw = torch.matmul(invM, torch.cat([point, point.new_ones(1)]).unsqueeze(1))
return uvw
else:
raise ValueError('Barycentric coords only works for 2 or 3 points')
示例9: affineAug
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def affineAug(img, max_add = 0.5):
img_s = img.squeeze()
h,w = img_s.size()
### Generate A
A = torch.eye(3)
rand_add = max_add *(torch.rand(3,3) - 0.5) * 2.0
##No perspective change
rand_add[2,0:2] = 0
rand_add[2,2] = 0;
A = A + rand_add
denormA = Grid2PxA(w,h)
normA = Px2GridA(w, h)
if img.is_cuda:
A = A.cuda()
denormA = denormA.cuda()
normA = normA.cuda()
grid = torch.nn.functional.affine_grid(A[0:2,:].unsqueeze(0), torch.Size((1,1,h,w)))
H_Orig2New = torch.mm(torch.mm(denormA, torch.inverse(A)), normA)
new_img = torch.nn.functional.grid_sample(img_s.float().unsqueeze(0).unsqueeze(0), grid)
return new_img, H_Orig2New,
示例10: LAFMagicFro
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def LAFMagicFro(LAFs1, LAFs2, H1to2, xy_th = 5.0, scale_log = 0.4):
LHF2_in_1 = reprojectLAFs(LAFs2, torch.inverse(H1to2), True)
LHF1 = LAFs_to_H_frames(LAFs1)
idxs_in1, idxs_in_2 = get_closest_correspondences_idxs(LHF1, LHF2_in_1, xy_th, scale_log)
if len(idxs_in1) == 0:
print('Warning, no correspondences found')
return None
LHF1_good = LHF1[idxs_in1,:,:]
LHF2_good = LHF2_in_1[idxs_in_2,:,:]
scales1 = get_LHFScale(LHF1_good);
scales2 = get_LHFScale(LHF2_good);
max_scale = torch.max(scales1,scales2);
min_scale = torch.min(scales1, scales2);
mean_scale = 0.5 * (max_scale + min_scale)
eps = 1e-12;
dist_loss = (torch.sqrt((LHF1_good.view(-1,9) - LHF2_good.view(-1,9))**2 + eps) / V(mean_scale.data).view(-1,1).expand(LHF1_good.size(0),9)).mean(dim=1);
loss = dist_loss;
#print dist_loss, scale_loss, shape_loss
return loss, idxs_in1, idxs_in_2, LHF2_in_1[:,0:2,:]
示例11: _compute_w
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def _compute_w(self, XS, YS_inner):
'''
Use Newton's method to obtain w from support set XS, YS_inner
https://github.com/bertinetto/r2d2/blob/master/fewshots/models/lrd2.py
'''
for i in range(self.iters):
# use eta to store w_{i-1}^T X
if i == 0:
eta = torch.zeros_like(XS[:,0]) # support_size
else:
eta = (XS @ w).squeeze(1)
mu = torch.sigmoid(eta)
s = mu * (1 - mu)
z = eta + (YS_inner - mu) / s
Sinv = torch.diag(1.0/s)
# Woodbury with regularization
w = XS.t() @ torch.inverse(XS @ XS.t() + (10. ** self.lam) * Sinv) @ z.unsqueeze(1)
return w
示例12: setUp
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def setUp(self):
self.features = 3
self.transform = svd.SVDLinear(features=self.features, num_householder=4)
self.transform.bias.data = torch.randn(self.features) # Just so bias isn't zero.
diagonal = torch.diag(torch.exp(self.transform.log_diagonal))
orthogonal_1 = self.transform.orthogonal_1.matrix()
orthogonal_2 = self.transform.orthogonal_2.matrix()
self.weight = orthogonal_1 @ diagonal @ orthogonal_2
self.weight_inverse = torch.inverse(self.weight)
self.logabsdet = utils.logabsdet(self.weight)
self.eps = 1e-5
示例13: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def __init__(self):
self.weight = None
self.inverse = None
self.logabsdet = None
示例14: invalidate
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def invalidate(self):
self.weight = None
self.inverse = None
self.logabsdet = None
示例15: inverse
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import inverse [as 別名]
def inverse(self, inputs, context=None):
if not self.training and self.using_cache:
self._check_inverse_cache()
outputs = F.linear(inputs - self.bias, self.cache.inverse)
logabsdet = (-self.cache.logabsdet) * torch.ones(outputs.shape[0])
return outputs, logabsdet
else:
return self.inverse_no_cache(inputs)