本文整理匯總了Python中torch.isinf方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.isinf方法的具體用法?Python torch.isinf怎麽用?Python torch.isinf使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.isinf方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def test_forward(self):
batch_size = 10
in_shape = [2, 3, 4]
out_shape = [5, 6]
inputs = torch.randn(batch_size, *in_shape)
for hidden_sizes in [[20], [20, 30], [20, 30, 40]]:
with self.subTest(hidden_sizes=hidden_sizes):
model = mlp.MLP(
in_shape=in_shape,
out_shape=out_shape,
hidden_sizes=hidden_sizes,
)
outputs = model(inputs)
self.assertIsInstance(outputs, torch.Tensor)
self.assertEqual(outputs.shape, torch.Size([batch_size] + out_shape))
self.assertFalse(torch.isnan(outputs).any())
self.assertFalse(torch.isinf(outputs).any())
with self.assertRaises(Exception):
mlp.MLP(
in_shape=in_shape,
out_shape=out_shape,
hidden_sizes=[],
)
示例2: test_sample
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def test_sample(self):
num_samples = 10
context_size = 20
input_shape = [2, 3, 4]
context_shape = [5, 6]
dist = normal.StandardNormal(input_shape)
maybe_context = torch.randn(context_size, *context_shape)
for context in [None, maybe_context]:
with self.subTest(context=context):
samples = dist.sample(num_samples, context=context)
self.assertIsInstance(samples, torch.Tensor)
self.assertFalse(torch.isnan(samples).any())
self.assertFalse(torch.isinf(samples).any())
if context is None:
self.assertEqual(samples.shape, torch.Size([num_samples] + input_shape))
else:
self.assertEqual(
samples.shape, torch.Size([context_size, num_samples] + input_shape))
示例3: test_mean
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def test_mean(self):
context_size = 20
input_shape = [2, 3, 4]
context_shape = [5, 6]
dist = normal.StandardNormal(input_shape)
maybe_context = torch.randn(context_size, *context_shape)
for context in [None, maybe_context]:
with self.subTest(context=context):
means = dist.mean(context=context)
self.assertIsInstance(means, torch.Tensor)
self.assertFalse(torch.isnan(means).any())
self.assertFalse(torch.isinf(means).any())
self.assertEqual(means, torch.zeros_like(means))
if context is None:
self.assertEqual(means.shape, torch.Size(input_shape))
else:
self.assertEqual(means.shape, torch.Size([context_size] + input_shape))
示例4: test_sample_and_log_prob_with_context
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def test_sample_and_log_prob_with_context(self):
num_samples = 10
context_size = 20
input_shape = [2, 3, 4]
context_shape = [2, 3, 4]
dist = discrete.ConditionalIndependentBernoulli(input_shape)
context = torch.randn(context_size, *context_shape)
samples, log_prob = dist.sample_and_log_prob(num_samples, context=context)
self.assertIsInstance(samples, torch.Tensor)
self.assertIsInstance(log_prob, torch.Tensor)
self.assertEqual(samples.shape, torch.Size([context_size, num_samples] + input_shape))
self.assertEqual(log_prob.shape, torch.Size([context_size, num_samples]))
self.assertFalse(torch.isnan(log_prob).any())
self.assertFalse(torch.isinf(log_prob).any())
self.assert_tensor_less_equal(log_prob, 0.0)
self.assertFalse(torch.isnan(samples).any())
self.assertFalse(torch.isinf(samples).any())
binary = (samples == 1.0) | (samples == 0.0)
self.assertEqual(binary, torch.ones_like(binary))
示例5: test_stochastic_elbo
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def test_stochastic_elbo(self):
batch_size = 10
input_shape = [2, 3, 4]
latent_shape = [5, 6]
prior = distributions.StandardNormal(latent_shape)
approximate_posterior = distributions.StandardNormal(latent_shape)
likelihood = distributions.StandardNormal(input_shape)
vae = base.VariationalAutoencoder(prior, approximate_posterior, likelihood)
inputs = torch.randn(batch_size, *input_shape)
for num_samples in [1, 10, 100]:
with self.subTest(num_samples=num_samples):
elbo = vae.stochastic_elbo(inputs, num_samples)
self.assertIsInstance(elbo, torch.Tensor)
self.assertFalse(torch.isnan(elbo).any())
self.assertFalse(torch.isinf(elbo).any())
self.assertEqual(elbo.shape, torch.Size([batch_size]))
示例6: test_sample
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def test_sample(self):
num_samples = 10
input_shape = [2, 3, 4]
latent_shape = [5, 6]
prior = distributions.StandardNormal(latent_shape)
approximate_posterior = distributions.StandardNormal(latent_shape)
likelihood = distributions.StandardNormal(input_shape)
vae = base.VariationalAutoencoder(prior, approximate_posterior, likelihood)
for mean in [True, False]:
with self.subTest(mean=mean):
samples = vae.sample(num_samples, mean=mean)
self.assertIsInstance(samples, torch.Tensor)
self.assertFalse(torch.isnan(samples).any())
self.assertFalse(torch.isinf(samples).any())
self.assertEqual(samples.shape, torch.Size([num_samples] + input_shape))
示例7: test_encode
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def test_encode(self):
batch_size = 20
input_shape = [2, 3, 4]
latent_shape = [5, 6]
inputs = torch.randn(batch_size, *input_shape)
prior = distributions.StandardNormal(latent_shape)
approximate_posterior = distributions.StandardNormal(latent_shape)
likelihood = distributions.StandardNormal(input_shape)
vae = base.VariationalAutoencoder(prior, approximate_posterior, likelihood)
for num_samples in [None, 1, 10]:
with self.subTest(num_samples=num_samples):
encodings = vae.encode(inputs, num_samples)
self.assertIsInstance(encodings, torch.Tensor)
self.assertFalse(torch.isnan(encodings).any())
self.assertFalse(torch.isinf(encodings).any())
if num_samples is None:
self.assertEqual(encodings.shape, torch.Size([batch_size] + latent_shape))
else:
self.assertEqual(
encodings.shape, torch.Size([batch_size, num_samples] + latent_shape))
示例8: test_reconstruct
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def test_reconstruct(self):
batch_size = 20
input_shape = [2, 3, 4]
latent_shape = [5, 6]
inputs = torch.randn(batch_size, *input_shape)
prior = distributions.StandardNormal(latent_shape)
approximate_posterior = distributions.StandardNormal(latent_shape)
likelihood = distributions.StandardNormal(input_shape)
vae = base.VariationalAutoencoder(prior, approximate_posterior, likelihood)
for mean in [True, False]:
for num_samples in [None, 1, 10]:
with self.subTest(mean=mean, num_samples=num_samples):
recons = vae.reconstruct(inputs, num_samples=num_samples, mean=mean)
self.assertIsInstance(recons, torch.Tensor)
self.assertFalse(torch.isnan(recons).any())
self.assertFalse(torch.isinf(recons).any())
if num_samples is None:
self.assertEqual(recons.shape, torch.Size([batch_size] + input_shape))
else:
self.assertEqual(
recons.shape, torch.Size([batch_size, num_samples] + input_shape))
示例9: normalize_feature
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def normalize_feature(mx):
"""Row-normalize sparse matrix
Parameters
----------
mx : scipy.sparse.csr_matrix
matrix to be normalized
Returns
-------
scipy.sprase.lil_matrix
normalized matrix
"""
if type(mx) is not sp.lil.lil_matrix:
mx = mx.tolil()
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
示例10: normalize_adj_tensor
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def normalize_adj_tensor(adj, sparse=False):
"""Normalize adjacency tensor matrix.
"""
device = torch.device("cuda" if adj.is_cuda else "cpu")
if sparse:
# TODO if this is too slow, uncomment the following code,
# but you need to install torch_scatter
# return normalize_sparse_tensor(adj)
adj = to_scipy(adj)
mx = normalize_adj(adj)
return sparse_mx_to_torch_sparse_tensor(mx).to(device)
else:
mx = adj + torch.eye(adj.shape[0]).to(device)
rowsum = mx.sum(1)
r_inv = rowsum.pow(-1/2).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv)
mx = r_mat_inv @ mx
mx = mx @ r_mat_inv
return mx
示例11: degree_normalize_adj_tensor
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def degree_normalize_adj_tensor(adj, sparse=True):
"""degree_normalize_adj_tensor.
"""
device = torch.device("cuda" if adj.is_cuda else "cpu")
if sparse:
# return degree_normalize_sparse_tensor(adj)
adj = to_scipy(adj)
mx = degree_normalize_adj(adj)
return sparse_mx_to_torch_sparse_tensor(mx).to(device)
else:
mx = adj + torch.eye(adj.shape[0]).to(device)
rowsum = mx.sum(1)
r_inv = rowsum.pow(-1).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv)
mx = r_mat_inv @ mx
return mx
示例12: feature_smoothing
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def feature_smoothing(self, adj, X):
adj = (adj.t() + adj)/2
rowsum = adj.sum(1)
r_inv = rowsum.flatten()
D = torch.diag(r_inv)
L = D - adj
r_inv = r_inv + 1e-3
r_inv = r_inv.pow(-1/2).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv)
# L = r_mat_inv @ L
L = r_mat_inv @ L @ r_mat_inv
XLXT = torch.matmul(torch.matmul(X.t(), L), X)
loss_smooth_feat = torch.trace(XLXT)
return loss_smooth_feat
示例13: _validate
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def _validate(self):
if self.means.dim() != 2:
raise ValueError("means should be 2D (first dimension batch-size)")
if self.covs.dim() != 3:
raise ValueError("covs should be 3D (first dimension batch-size)")
if torch.isinf(self.means).any():
raise ValueError("Infs in `means`.")
if torch.isinf(self.covs).any():
raise ValueError("Infs in `covs`.")
if torch.isnan(self.means).any():
raise ValueError("nans in `means`.")
if torch.isnan(self.covs).any():
raise ValueError("nans in `covs`.")
if self.covs.shape[0] != self.means.shape[0]:
raise ValueError("The batch-size (1st dimension) of cov doesn't match that of mean.")
if self.covs.shape[1] != self.covs.shape[2]:
raise ValueError("The cov should be symmetric in the last two dimensions.")
if self.covs.shape[1] != self.means.shape[1]:
raise ValueError("The state-size (2nd/3rd dimension) of cov doesn't match that of mean.")
if self.last_measured.shape[0] != self.num_groups or self.last_measured.dim() != 1:
raise ValueError(f"`last_measured` should be 1D tensor w/length of {self.num_groups:,}.")
示例14: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def forward(self, inputs):
while True:
gumbels = -torch.empty_like(self.arch_parameters).exponential_().log()
logits = (self.arch_parameters.log_softmax(dim=1) + gumbels) / self.tau
probs = nn.functional.softmax(logits, dim=1)
index = probs.max(-1, keepdim=True)[1]
one_h = torch.zeros_like(logits).scatter_(-1, index, 1.0)
hardwts = one_h - probs.detach() + probs
if (torch.isinf(gumbels).any()) or (torch.isinf(probs).any()) or (torch.isnan(probs).any()):
continue
else: break
feature = self.stem(inputs)
for i, cell in enumerate(self.cells):
if isinstance(cell, SearchCell):
feature = cell.forward_gdas(feature, hardwts, index)
else:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling( out )
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return out, logits
示例15: select2withP
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import isinf [as 別名]
def select2withP(logits, tau, just_prob=False, num=2, eps=1e-7):
if tau <= 0:
new_logits = logits
probs = nn.functional.softmax(new_logits, dim=1)
else :
while True: # a trick to avoid the gumbels bug
gumbels = -torch.empty_like(logits).exponential_().log()
new_logits = (logits.log_softmax(dim=1) + gumbels) / tau
probs = nn.functional.softmax(new_logits, dim=1)
if (not torch.isinf(gumbels).any()) and (not torch.isinf(probs).any()) and (not torch.isnan(probs).any()): break
if just_prob: return probs
#with torch.no_grad(): # add eps for unexpected torch error
# probs = nn.functional.softmax(new_logits, dim=1)
# selected_index = torch.multinomial(probs + eps, 2, False)
with torch.no_grad(): # add eps for unexpected torch error
probs = probs.cpu()
selected_index = torch.multinomial(probs + eps, num, False).to(logits.device)
selected_logit = torch.gather(new_logits, 1, selected_index)
selcted_probs = nn.functional.softmax(selected_logit, dim=1)
return selected_index, selcted_probs