本文整理汇总了Python中torch.logdet方法的典型用法代码示例。如果您正苦于以下问题:Python torch.logdet方法的具体用法?Python torch.logdet怎么用?Python torch.logdet使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.logdet方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W)
z = self.conv(z)
return z, log_det_W
示例2: test_inv_quad_logdet
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def test_inv_quad_logdet(self):
# Forward
lazy_tensor = self.create_lazy_tensor(with_solves=True, with_logdet=True)
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
flattened_evaluated = evaluated.view(-1, *lazy_tensor.matrix_shape)
vecs = lazy_tensor.eager_rhss[0].clone().detach().requires_grad_(True)
vecs_copy = lazy_tensor.eager_rhss[0].clone().detach().requires_grad_(True)
with gpytorch.settings.num_trace_samples(128), warnings.catch_warnings(record=True) as ws:
res_inv_quad, res_logdet = lazy_tensor.inv_quad_logdet(inv_quad_rhs=vecs, logdet=True)
self.assertFalse(any(issubclass(w.category, ExtraComputationWarning) for w in ws))
res = res_inv_quad + res_logdet
actual_inv_quad = evaluated.inverse().matmul(vecs_copy).mul(vecs_copy).sum(-2).sum(-1)
actual_logdet = torch.cat(
[torch.logdet(flattened_evaluated[i]).unsqueeze(0) for i in range(lazy_tensor.batch_shape.numel())]
).view(lazy_tensor.batch_shape)
actual = actual_inv_quad + actual_logdet
diff = (res - actual).abs() / actual.abs().clamp(1, math.inf)
self.assertLess(diff.max().item(), 15e-2)
示例3: test_inv_quad_logdet_no_reduce
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def test_inv_quad_logdet_no_reduce(self):
# Forward
lazy_tensor = self.create_lazy_tensor(with_solves=True, with_logdet=True)
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
flattened_evaluated = evaluated.view(-1, *lazy_tensor.matrix_shape)
vecs = lazy_tensor.eager_rhss[0].clone().detach().requires_grad_(True)
vecs_copy = lazy_tensor.eager_rhss[0].clone().detach().requires_grad_(True)
with gpytorch.settings.num_trace_samples(128), warnings.catch_warnings(record=True) as ws:
res_inv_quad, res_logdet = lazy_tensor.inv_quad_logdet(
inv_quad_rhs=vecs, logdet=True, reduce_inv_quad=False
)
self.assertFalse(any(issubclass(w.category, ExtraComputationWarning) for w in ws))
res = res_inv_quad.sum(-1) + res_logdet
actual_inv_quad = evaluated.inverse().matmul(vecs_copy).mul(vecs_copy).sum(-2).sum(-1)
actual_logdet = torch.cat(
[torch.logdet(flattened_evaluated[i]).unsqueeze(0) for i in range(lazy_tensor.batch_shape.numel())]
).view(lazy_tensor.batch_shape)
actual = actual_inv_quad + actual_logdet
diff = (res - actual).abs() / actual.abs().clamp(1, math.inf)
self.assertLess(diff.max().item(), 15e-2)
示例4: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W)
z = self.conv(z)
return z, log_det_W
示例5: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def forward(self, z, reverse: bool = False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.dtype == torch.half:
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W.float())
z = self.conv(z)
return (
z,
log_det_W,
)
示例6: logabsdet
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def logabsdet(x):
"""Returns the log absolute determinant of square matrix x."""
# Note: torch.logdet() only works for positive determinant.
_, res = torch.slogdet(x)
return res
示例7: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def forward(self):
constrainted_matrix = self.select_param()
matrix_ = torch.squeeze(torch.squeeze(constrainted_matrix,dim=2),dim=2)
matrix_t = torch.t(matrix_)
matrixs = torch.mm(matrix_t,matrix_)
trace_ = torch.trace(torch.mm(matrixs,torch.inverse(matrixs)))
log_det = torch.logdet(matrixs)
maha_loss = trace_ - log_det
return maha_loss
示例8: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def __init__(self, nu, K, validate_args=False):
TModule.__init__(self)
if K.dim() < 2:
raise ValueError("K must be at least 2-dimensional")
n = K.shape[-1]
if K.shape[-2] != K.shape[-1]:
raise ValueError("K must be square")
if isinstance(nu, Number):
nu = torch.tensor(float(nu))
if torch.any(nu <= n):
raise ValueError("Must have nu > n - 1")
self.n = torch.tensor(n, dtype=torch.long, device=nu.device)
batch_shape = nu.shape
event_shape = torch.Size([n, n])
# normalization constant
logdetK = torch.logdet(K)
C = -(nu / 2) * (logdetK + n * math.log(2)) - torch.mvlgamma(nu / 2, n)
K_inv = torch.inverse(K)
# need to assign values before registering as buffers to make argument validation work
self.nu = nu
self.K_inv = K_inv
self.C = C
super(WishartPrior, self).__init__(batch_shape, event_shape, validate_args=validate_args)
# now need to delete to be able to register buffer
del self.nu, self.K_inv, self.C
self.register_buffer("nu", nu)
self.register_buffer("K_inv", K_inv)
self.register_buffer("C", C)
示例9: log_prob
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def log_prob(self, X):
# I'm sure this could be done more elegantly
logdetp = torch.logdet(X)
Kinvp = torch.matmul(self.K_inv, X)
trKinvp = torch.diagonal(Kinvp, dim1=-2, dim2=-1).sum(-1)
return self.C + 0.5 * (self.nu - self.n - 1) * logdetp - trKinvp
示例10: _test_inv_quad_logdet
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def _test_inv_quad_logdet(self, reduce_inv_quad=True, cholesky=False):
if not self.__class__.skip_slq_tests:
# Forward
lazy_tensor = self.create_lazy_tensor()
evaluated = self.evaluate_lazy_tensor(lazy_tensor)
flattened_evaluated = evaluated.view(-1, *lazy_tensor.matrix_shape)
vecs = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 3, requires_grad=True)
vecs_copy = vecs.clone().detach_().requires_grad_(True)
_wrapped_cg = MagicMock(wraps=gpytorch.utils.linear_cg)
with patch("gpytorch.utils.linear_cg", new=_wrapped_cg) as linear_cg_mock:
with gpytorch.settings.num_trace_samples(256), gpytorch.settings.max_cholesky_size(
math.inf if cholesky else 0
), gpytorch.settings.cg_tolerance(1e-5):
res_inv_quad, res_logdet = lazy_tensor.inv_quad_logdet(
inv_quad_rhs=vecs, logdet=True, reduce_inv_quad=reduce_inv_quad
)
actual_inv_quad = evaluated.inverse().matmul(vecs_copy).mul(vecs_copy).sum(-2)
if reduce_inv_quad:
actual_inv_quad = actual_inv_quad.sum(-1)
actual_logdet = torch.cat(
[torch.logdet(flattened_evaluated[i]).unsqueeze(0) for i in range(lazy_tensor.batch_shape.numel())]
).view(lazy_tensor.batch_shape)
self.assertAllClose(res_inv_quad, actual_inv_quad, rtol=0.01, atol=0.01)
self.assertAllClose(res_logdet, actual_logdet, rtol=0.2, atol=0.03)
if not cholesky and self.__class__.should_call_cg:
self.assertTrue(linear_cg_mock.called)
else:
self.assertFalse(linear_cg_mock.called)
示例11: log_det_by_cholesky_test
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def log_det_by_cholesky_test():
"""
test for function log_det_by_cholesky()
"""
a = torch.randn(1, 4, 4)
a = torch.matmul(a, a.transpose(2, 1))
print(a)
res_1 = torch.logdet(torch.squeeze(a))
res_2 = log_det_by_cholesky(a)
print(res_1, res_2)
示例12: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def forward(self, post: Posterior, comp: Tensor) -> Tensor:
r"""Calculate approximated log evidence, i.e., log(P(D|theta))
Args:
post: training posterior distribution from self.model
comp: Comparisons pairs, see PairwiseGP.__init__ for more details
Returns:
The approximated evidence, i.e., the marginal log likelihood
"""
model = self.model
if comp is not model.comparisons:
raise RuntimeError("Must train on training data")
f_max = post.mean
log_posterior = model._posterior_f(f_max)
part1 = -log_posterior
part2 = model.covar @ model.likelihood_hess
eye = torch.eye(part2.size(-1)).expand(part2.shape)
part2 = part2 + eye
part2 = -0.5 * torch.logdet(part2)
evidence = part1 + part2
# Sum up mll first so that when adding prior probs it won't
# propagate and double count
evidence = evidence.sum()
# Add log probs of priors on the (functions of) parameters
for _, prior, closure, _ in self.named_priors():
evidence = evidence.add(prior.log_prob(closure()).sum())
return evidence
示例13: log_det_other
# 需要导入模块: import torch [as 别名]
# 或者: from torch import logdet [as 别名]
def log_det_other(x):
return torch.logdet(x)