本文整理汇总了Python中torch.isfinite方法的典型用法代码示例。如果您正苦于以下问题:Python torch.isfinite方法的具体用法?Python torch.isfinite怎么用?Python torch.isfinite使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.isfinite方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _update
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def _update(self, y):
# ===== Save data ===== #
self._y.append(y)
# ===== Perform a filtering move ===== #
_, ll = self.filter.filter(y)
self._w_rec += ll
# ===== Calculate efficient number of samples ===== #
ess = get_ess(self._w_rec)
self._logged_ess.append(ess)
# ===== Rejuvenate if there are too few samples ===== #
if ess < self._threshold or (~isfinite(self._w_rec)).any():
self.rejuvenate()
self._w_rec[:] = 0.
return self
示例2: test_n_additions_via_scalar_multiplication
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def test_n_additions_via_scalar_multiplication(n, a, dtype, negative, manifold, strict):
n = torch.as_tensor(n, dtype=a.dtype).requires_grad_()
y = torch.zeros_like(a)
for _ in range(int(n.item())):
y = manifold.mobius_add(a, y)
ny = manifold.mobius_scalar_mul(n, a)
if negative:
tolerance = {
torch.float32: dict(atol=4e-5, rtol=1e-3),
torch.float64: dict(atol=1e-5, rtol=1e-3),
}
else:
tolerance = {
torch.float32: dict(atol=2e-6, rtol=1e-3),
torch.float64: dict(atol=1e-5, rtol=1e-3),
}
tolerant_allclose_check(y, ny, strict=strict, **tolerance[dtype])
ny.sum().backward()
assert torch.isfinite(n.grad).all()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(manifold.k.grad).all()
示例3: test_scalar_multiplication_distributive
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def test_scalar_multiplication_distributive(a, r1, r2, manifold, dtype):
res = manifold.mobius_scalar_mul(r1 + r2, a)
res1 = manifold.mobius_add(
manifold.mobius_scalar_mul(r1, a), manifold.mobius_scalar_mul(r2, a),
)
res2 = manifold.mobius_add(
manifold.mobius_scalar_mul(r1, a), manifold.mobius_scalar_mul(r2, a),
)
tolerance = {
torch.float32: dict(atol=5e-6, rtol=1e-4),
torch.float64: dict(atol=1e-7, rtol=1e-4),
}
np.testing.assert_allclose(res1.detach(), res.detach(), **tolerance[dtype])
np.testing.assert_allclose(res2.detach(), res.detach(), **tolerance[dtype])
res.sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(r1.grad).all()
assert torch.isfinite(r2.grad).all()
assert torch.isfinite(manifold.k.grad).all()
示例4: test_geodesic_segment_length_property
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def test_geodesic_segment_length_property(a, b, manifold, dtype):
extra_dims = len(a.shape)
segments = 12
t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(
(segments + 1,) + (1,) * extra_dims
)
gamma_ab_t = manifold.geodesic(t, a, b)
gamma_ab_t0 = gamma_ab_t[:-1]
gamma_ab_t1 = gamma_ab_t[1:]
dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)
speed = manifold.dist(a, b, keepdim=True).unsqueeze(0).expand_as(dist_ab_t0mt1)
# we have exactly 12 line segments
tolerance = {
torch.float32: dict(rtol=1e-5, atol=5e-3),
torch.float64: dict(rtol=1e-5, atol=5e-3),
}
length = speed / segments
np.testing.assert_allclose(
dist_ab_t0mt1.detach(), length.detach(), **tolerance[dtype]
)
(length + dist_ab_t0mt1).sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
示例5: test_geodesic_segement_unit_property
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def test_geodesic_segement_unit_property(a, b, manifold, dtype):
extra_dims = len(a.shape)
segments = 12
t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(
(segments + 1,) + (1,) * extra_dims
)
gamma_ab_t = manifold.geodesic_unit(t, a, b)
gamma_ab_t0 = gamma_ab_t[:1]
gamma_ab_t1 = gamma_ab_t
dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)
true_distance_travelled = t.expand_as(dist_ab_t0mt1)
# we have exactly 12 line segments
tolerance = {
torch.float32: dict(atol=2e-4, rtol=5e-5),
torch.float64: dict(atol=1e-10),
}
np.testing.assert_allclose(
dist_ab_t0mt1.detach(), true_distance_travelled.detach(), **tolerance[dtype]
)
(true_distance_travelled + dist_ab_t0mt1).sum().backward()
assert torch.isfinite(a.grad).all()
assert torch.isfinite(b.grad).all()
assert torch.isfinite(manifold.k.grad).all()
示例6: assert_never_inf
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def assert_never_inf(tensor):
"""Make sure there are no Inf values in the given tensor.
Parameters
----------
tensor : torch.tensor
input tensor
Raises
------
InfTensorException
If one or more Inf values occur in the given tensor
"""
try:
assert torch.isfinite(tensor).byte().any()
except AssertionError:
raise InfTensorException("There was an Inf value in tensor")
示例7: apply_gradients
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def apply_gradients(self, grads_and_vars):
self._iterations += 1
grads, var_list = list(zip(*grads_and_vars))
new_grads = []
if self._summaries:
summary.scalar("optimizer/scale", self._scale,
utils.get_global_step())
for grad in grads:
if grad is None:
new_grads.append(None)
continue
norm = grad.data.norm()
if not torch.isfinite(norm):
self._update_if_not_finite_grads()
return
else:
# Rescale gradients
new_grads.append(grad.data.float().mul_(1.0 / self._scale))
self._update_if_finite_grads()
self._optimizer.apply_gradients(zip(new_grads, var_list))
示例8: test_positional_embeddings
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def test_positional_embeddings(positional_encoding: Optional[str]):
# All sizes are prime, making them easy to find during debugging.
batch_size = 7
max_seq_len = 101
n_head = 5
dims = 11 * n_head
transformer = PytorchTransformer(
dims, 3, positional_encoding=positional_encoding, num_attention_heads=n_head
)
transformer.eval()
with torch.no_grad():
inputs = torch.randn(batch_size, max_seq_len, dims)
mask = torch.ones(batch_size, max_seq_len, dtype=torch.bool)
for b in range(batch_size):
mask[b, max_seq_len - b :] = False
assert not torch.isnan(inputs).any()
assert torch.isfinite(inputs).all()
outputs = transformer(inputs, mask)
assert outputs.size() == inputs.size()
assert not torch.isnan(outputs).any()
assert torch.isfinite(outputs).all()
示例9: __call__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def __call__(self, engine):
output = self._output_transform(engine.state.output)
def raise_error(x):
if isinstance(x, numbers.Number):
x = torch.tensor(x)
if isinstance(x, torch.Tensor) and not bool(torch.isfinite(x).all()):
raise RuntimeError("Infinite or NaN tensor found.")
try:
apply_to_type(output, (numbers.Number, torch.Tensor), raise_error)
except RuntimeError:
self._logger.warning("{}: Output '{}' contains NaN or Inf. Stop training"
.format(self.__class__.__name__, output))
engine.terminate()
示例10: assertAllClose
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def assertAllClose(self, tensor1, tensor2, rtol=1e-4, atol=1e-5, equal_nan=False):
if not tensor1.shape == tensor2.shape:
raise ValueError(f"tensor1 ({tensor1.shape}) and tensor2 ({tensor2.shape}) do not have the same shape.")
if torch.allclose(tensor1, tensor2, rtol=rtol, atol=atol, equal_nan=equal_nan):
return True
if not equal_nan:
if not torch.equal(tensor1, tensor1):
raise AssertionError(f"tensor1 ({tensor1.shape}) contains NaNs")
if not torch.equal(tensor2, tensor2):
raise AssertionError(f"tensor2 ({tensor2.shape}) contains NaNs")
rtol_diff = (torch.abs(tensor1 - tensor2) / torch.abs(tensor2)).view(-1)
rtol_diff = rtol_diff[torch.isfinite(rtol_diff)]
rtol_max = rtol_diff.max().item()
atol_diff = (torch.abs(tensor1 - tensor2) - torch.abs(tensor2).mul(rtol)).view(-1)
atol_diff = atol_diff[torch.isfinite(atol_diff)]
atol_max = atol_diff.max().item()
raise AssertionError(
f"tensor1 ({tensor1.shape}) and tensor2 ({tensor2.shape}) are not close enough. \n"
f"max rtol: {rtol_max:0.8f}\t\tmax atol: {atol_max:0.8f}"
)
示例11: detect_nan_tensors
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def detect_nan_tensors(self, loss: Tensor) -> None:
model = self.get_model()
# check if loss is nan
if not torch.isfinite(loss).all():
raise ValueError(
'The loss returned in `training_step` is nan or inf.'
)
# check if a network weight is nan
for name, param in model.named_parameters():
if not torch.isfinite(param).all():
self.print_nan_gradients()
raise ValueError(
f'Detected nan and/or inf values in `{name}`.'
' Check your forward pass for numerically unstable operations.'
)
示例12: test_nan_params_detection
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def test_nan_params_detection(tmpdir):
class CurrentModel(EvalModelTemplate):
test_batch_nan = 8
def on_after_backward(self):
if self.global_step == self.test_batch_nan:
# simulate parameter that became nan
torch.nn.init.constant_(self.c_d1.bias, math.nan)
model = CurrentModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=(model.test_batch_nan + 1),
terminate_on_nan=True,
)
with pytest.raises(ValueError, match=r'.*Detected nan and/or inf values in `c_d1.bias`.*'):
trainer.fit(model)
assert trainer.global_step == model.test_batch_nan
# after aborting the training loop, model still has nan-valued params
params = torch.cat([param.view(-1) for param in model.parameters()])
assert not torch.isfinite(params).all()
示例13: test_inf_nan_data
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def test_inf_nan_data(self):
self.model.eval()
self.model.score_threshold = -999999999
for tensor in [self._inf_tensor, self._nan_tensor]:
images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
features = [
tensor(1, 256, 128, 128),
tensor(1, 256, 64, 64),
tensor(1, 256, 32, 32),
tensor(1, 256, 16, 16),
tensor(1, 256, 8, 8),
]
anchors = self.model.anchor_generator(features)
_, pred_anchor_deltas = self.model.head(features)
HWAs = [np.prod(x.shape[-3:]) // 4 for x in pred_anchor_deltas]
pred_logits = [tensor(1, HWA, self.model.num_classes) for HWA in HWAs]
pred_anchor_deltas = [tensor(1, HWA, 4) for HWA in HWAs]
det = self.model.inference(anchors, pred_logits, pred_anchor_deltas, images.image_sizes)
# all predictions (if any) are infinite or nan
if len(det[0]):
self.assertTrue(torch.isfinite(det[0].pred_boxes.tensor).sum() == 0)
示例14: __call__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def __call__(self, engine: Engine) -> None:
output = self._output_transform(engine.state.output)
def raise_error(x: Union[numbers.Number, torch.Tensor]) -> None:
if isinstance(x, numbers.Number):
x = torch.tensor(x)
if isinstance(x, torch.Tensor) and not bool(torch.isfinite(x).all()):
raise RuntimeError("Infinite or NaN tensor found.")
try:
apply_to_type(output, (numbers.Number, torch.Tensor), raise_error)
except RuntimeError:
self.logger.warning(
"{}: Output '{}' contains NaN or Inf. Stop training".format(self.__class__.__name__, output)
)
engine.terminate()
示例15: _check_gradients
# 需要导入模块: import torch [as 别名]
# 或者: from torch import isfinite [as 别名]
def _check_gradients(harn):
"""
Checks that the the accumulated gradients are all finite.
Raises:
TrainingDiverged: if checks fail
Example:
harn = ...
all_grads = harn._check_gradients()
ub.map_vals(torch.norm, all_grads)
"""
all_grads = ub.odict()
for name, parameter in harn.model.named_parameters():
if parameter.grad is not None:
all_grads[name] = parameter.grad.data
for key, value in all_grads.items():
if torch.any(~torch.isfinite(value)):
raise TrainingDiverged(
'NON-FINITE GRAD {}.grad = {!r}'.format(key, value))
return all_grads