本文整理匯總了Python中torch.det方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.det方法的具體用法?Python torch.det怎麽用?Python torch.det使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.det方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: estimate_pose
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def estimate_pose(self, pt0, pt1):
pconf2 = self.pconf.view(1, self.num_key, 1)
cent0 = torch.sum(pt0 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
cent1 = torch.sum(pt1 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
diag_mat = torch.diag(self.pconf).unsqueeze(0)
x = (pt0 - cent0).transpose(2, 1).contiguous()
y = pt1 - cent1
pred_t = cent1 - cent0
cov = torch.bmm(torch.bmm(x, diag_mat), y).contiguous().squeeze(0)
u, _, v = torch.svd(cov)
u = u.transpose(1, 0).contiguous()
d = torch.det(torch.mm(v, u)).contiguous().view(1, 1, 1).contiguous()
u = u.transpose(1, 0).contiguous().unsqueeze(0)
ud = torch.cat((u[:, :, :-1], u[:, :, -1:] * d), dim=2)
v = v.transpose(1, 0).contiguous().unsqueeze(0)
pred_r = torch.bmm(ud, v).transpose(2, 1).contiguous()
return pred_r, pred_t[:, 0, :].view(1, 3)
示例2: dpp_style
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def dpp_style(self, submethod):
"""Computes the DPP of a matrix."""
det_entries = torch.ones((self.total_CFs, self.total_CFs))
if submethod == "inverse_dist":
for i in range(self.total_CFs):
for j in range(self.total_CFs):
det_entries[(i,j)] = 1.0/(1.0 + self.compute_dist(self.cfs[i], self.cfs[j]))
if i == j:
det_entries[(i,j)] += 0.0001
elif submethod == "exponential_dist":
for i in range(self.total_CFs):
for j in range(self.total_CFs):
det_entries[(i,j)] = 1.0/(torch.exp(self.compute_dist(self.cfs[i], self.cfs[j])))
if i == j:
det_entries[(i,j)] += 0.0001
diversity_loss = torch.det(det_entries)
return diversity_loss
示例3: _logdetgrad
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def _logdetgrad(self):
return torch.log(torch.abs(torch.det(self.weight)))
示例4: get_weight
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def get_weight(self, input, reverse):
w_shape = self.w_shape
dlogdet = torch.log(torch.abs(torch.det(self.weight))) * input.size(-1)
if not reverse:
weight = self.weight.view(w_shape[0], w_shape[1], 1)
else:
weight = torch.inverse(self.weight).view(w_shape[0], w_shape[1], 1)
return weight, dlogdet
示例5: log_determinant
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def log_determinant(self, x, W):
h, w = x.shape[2:]
det = torch.det(W.to(torch.float64)).to(torch.float32)
if det.item() == 0:
det += 1e-6
return h * w * det.abs().log()
示例6: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def forward(self, y, x, return_eps=False):
""" p(y|x) where (x, y) are pair of input & output
y --> z, evaluate det(dz/dy) and p(z|x) --> p(y|x)
Args:
y (Tensor): output
x (Tensor): input
Returns:
z, logp(y|x), eps_list (None if return_eps is False)
"""
logdet = 0.
# list of conditioning features at different scales, and conditional prior
conditions, cond_prior = self.encoder(x)
eps_list = []
for i, module in enumerate(self.flow._modules.values()):
if i == 0:
# first revblock, no squeeze and split
y, dlogdet = module(y, conditions[i])
elif i == len(self.flow_blocks) - 1:
# last revblock, top latent
y, dlogdet, _ = module(y, conditions[i])
log_prior = cond_prior.log_prob(y)
if return_eps:
eps = (y - cond_prior.mean) / cond_prior.log_stddev.exp()
eps_list.append(eps)
logdet = logdet + log_prior
else:
# middel revblocks, squeeze and split latent
y, dlogdet, eps = module(y, conditions[i], return_eps=return_eps)
if return_eps:
eps_list.append(eps)
logdet = logdet + dlogdet
# y is actually z, latent
if return_eps:
return y, logdet, eps_list
else:
return y, logdet, None
示例7: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:,0] = -1*W[:,0]
W = W.view(c, c, 1)
self.conv.weight.data = W
示例8: estimate_rotation
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def estimate_rotation(self, pt0, pt1, sym_or_not):
pconf2 = self.pconf.view(1, self.num_key, 1)
cent0 = torch.sum(pt0 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
cent1 = torch.sum(pt1 * pconf2, dim=1).repeat(1, self.num_key, 1).contiguous()
diag_mat = torch.diag(self.pconf).unsqueeze(0)
x = (pt0 - cent0).transpose(2, 1).contiguous()
y = pt1 - cent1
pred_t = cent1 - cent0
cov = torch.bmm(torch.bmm(x, diag_mat), y).contiguous().squeeze(0)
u, _, v = torch.svd(cov)
u = u.transpose(1, 0).contiguous()
d = torch.det(torch.mm(v, u)).contiguous().view(1, 1, 1).contiguous()
u = u.transpose(1, 0).contiguous().unsqueeze(0)
ud = torch.cat((u[:, :, :-1], u[:, :, -1:] * d), dim=2)
v = v.transpose(1, 0).contiguous().unsqueeze(0)
pred_r = torch.bmm(ud, v).transpose(2, 1).contiguous()
if sym_or_not:
pred_r = torch.bmm(pred_r, self.sym_axis).contiguous().view(-1).contiguous()
return pred_r
示例9: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def forward(self, x, logdet=None, reverse=False):
"""
:param x: input
:type x: torch.Tensor
:param logdet: log determinant
:type logdet:
:param reverse: whether to reverse bias
:type reverse: bool
:return: output and logdet
:rtype: tuple(torch.Tensor, torch.Tensor)
"""
logdet_factor = ops.count_pixels(x) # H * W
dlogdet = torch.log(torch.abs(torch.det(self.weight))) * logdet_factor
if not reverse:
weight = self.weight.view(*self.weight.shape, 1, 1)
z = F.conv2d(x, weight)
if logdet is not None:
logdet = logdet + dlogdet
return z, logdet
else:
weight = self.weight.inverse().view(*self.weight.shape, 1, 1)
z = F.conv2d(x, weight)
if logdet is not None:
logdet = logdet - dlogdet
return z, logdet
示例10: log_jacobian_numerical
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def log_jacobian_numerical(self, x, c=None, rev=False, h=1e-04):
'''Approximate log Jacobian determinant via finite differences.'''
if isinstance(x, (list, tuple)):
batch_size = x[0].shape[0]
ndim_x_separate = [np.prod(x_i.shape[1:]) for x_i in x]
ndim_x_total = sum(ndim_x_separate)
x_flat = torch.cat([x_i.view(batch_size, -1) for x_i in x], dim=1)
else:
batch_size = x.shape[0]
ndim_x_total = np.prod(x.shape[1:])
x_flat = x.reshape(batch_size, -1)
J_num = torch.zeros(batch_size, ndim_x_total, ndim_x_total)
for i in range(ndim_x_total):
offset = x[0].new_zeros(batch_size, ndim_x_total)
offset[:,i] = h
if isinstance(x, (list, tuple)):
x_upper = torch.split(x_flat + offset, ndim_x_separate, dim=1)
x_upper = [x_upper[i].view(*x[i].shape) for i in range(len(x))]
x_lower = torch.split(x_flat - offset, ndim_x_separate, dim=1)
x_lower = [x_lower[i].view(*x[i].shape) for i in range(len(x))]
else:
x_upper = (x_flat + offset).view(*x.shape)
x_lower = (x_flat - offset).view(*x.shape)
y_upper = self.forward(x_upper, c=c)
y_lower = self.forward(x_lower, c=c)
if isinstance(y_upper, (list, tuple)):
y_upper = torch.cat([y_i.view(batch_size, -1) for y_i in y_upper], dim=1)
y_lower = torch.cat([y_i.view(batch_size, -1) for y_i in y_lower], dim=1)
J_num[:,:,i] = (y_upper - y_lower).view(batch_size, -1) / (2*h)
logdet_num = x[0].new_zeros(batch_size)
for i in range(batch_size):
logdet_num[i] = torch.det(J_num[i,:,:]).abs().log()
return logdet_num
示例11: robust_compute_rotation_matrix_from_ortho6d
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def robust_compute_rotation_matrix_from_ortho6d(poses):
"""
Instead of making 2nd vector orthogonal to first
create a base that takes into account the two predicted
directions equally
"""
x_raw = poses[:, 0:3] # batch*3
y_raw = poses[:, 3:6] # batch*3
x = normalize_vector(x_raw) # batch*3
y = normalize_vector(y_raw) # batch*3
middle = normalize_vector(x + y)
orthmid = normalize_vector(x - y)
x = normalize_vector(middle + orthmid)
y = normalize_vector(middle - orthmid)
# Their scalar product should be small !
# assert torch.einsum("ij,ij->i", [x, y]).abs().max() < 0.00001
z = normalize_vector(cross_product(x, y))
x = x.view(-1, 3, 1)
y = y.view(-1, 3, 1)
z = z.view(-1, 3, 1)
matrix = torch.cat((x, y, z), 2) # batch*3*3
# Check for reflection in matrix ! If found, flip last vector TODO
assert (torch.stack([torch.det(mat) for mat in matrix ])< 0).sum() == 0
return matrix
示例12: normalize_rot
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def normalize_rot(rot):
# U, S, V = torch.svd(A) returns the singular value
# decomposition of a real matrix A of size (n x m) such that A=USV′.
# Irrespective of the original strides, the returned matrix U will
# be transposed, i.e. with strides (1, n) instead of (n, 1).
# pytorch SVD seems to be inaccurate, so just move to numpy immediately
U, _, V = torch.svd(rot)
S = torch.eye(3).double()
S[2, 2] = torch.det(U) * torch.det(V)
return U.mm(S).mm(V.t())
示例13: best_fit_transform
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def best_fit_transform(A, B):
'''
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Nxm numpy array of corresponding points, usually points on mdl
B: Nxm numpy array of corresponding points, usually points on camera axis
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matirx
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# special reflection case
if np.linalg.det(R) < 0:
Vt[m-1, :] *= -1
R = np.dot(Vt.T, U.T)
# translation
t = centroid_B.T - np.dot(R, centroid_A.T)
T = np.zeros((3, 4))
T[:, :3] = R
T[:, 3] = t
return T
示例14: best_fit_transform_torch
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def best_fit_transform_torch(self, A, B):
'''
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Nxm numpy array of corresponding points, usually points on mdl
B: Nxm numpy array of corresponding points, usually points on camera axis
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
assert A.size() == B.size()
# get number of dimensions
m = A.size()[1]
# translate points to their centroids
centroid_A = torch.mean(A, dim=0)
centroid_B = torch.mean(B, dim=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matirx
H = torch.mm(AA.transpose(1, 0), BB)
U, S, Vt = torch.svd(H)
R = torch.mm(Vt.transpose(1, 0), U.transpose(1, 0))
# special reflection case
if torch.det(R) < 0:
Vt[m-1, :] *= -1
R = torch.mm(Vt.transpose(1, 0), U.transpose(1, 0))
# translation
t = centroid_B - torch.mm(R, centroid_A.view(3, 1))[:, 0]
T = torch.zeros(3, 4).cuda()
T[:, :3] = R
T[:, 3] = t
return T
示例15: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import det [as 別名]
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
self.conv.weight.data = W