本文整理匯總了Python中torch.any方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.any方法的具體用法?Python torch.any怎麽用?Python torch.any使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.any方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * bounded_iou_loss(
pred,
target,
weight,
beta=self.beta,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
示例2: test_center_region_assigner
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def test_center_region_assigner():
self = CenterRegionAssigner(pos_scale=0.3, neg_scale=1)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [8, 8, 9,
9]])
gt_bboxes = torch.FloatTensor([
[0, 0, 11, 11], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
[4.5, 4.5, 5.5, 5.5], # match bboxes[0] but area is too small
[0, 0, 10, 10], # match bboxes[1] and has a smaller area than gt[0]
])
gt_labels = torch.LongTensor([2, 3, 4, 5])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 3
assert len(assign_result.labels) == 3
expected_gt_inds = torch.LongTensor([4, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
shadowed_labels = assign_result.get_extra_property('shadowed_labels')
# [8, 8, 9, 9] in the shadowed region of [0, 0, 11, 11] (label: 2)
assert torch.any(shadowed_labels == torch.LongTensor([[2, 2]]))
# [8, 8, 9, 9] in the shadowed region of [0, 0, 10, 10] (label: 5)
assert torch.any(shadowed_labels == torch.LongTensor([[2, 5]]))
# [0, 0, 10, 10] is already assigned to [4.5, 4.5, 5.5, 5.5].
# Therefore, [0, 0, 11, 11] (label: 2) is shadowed
assert torch.any(shadowed_labels == torch.LongTensor([[0, 2]]))
示例3: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * iou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
示例4: __call__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def __call__(
self,
sample: Subject,
num_patches: Optional[int] = None,
) -> Generator[Subject, None, None]:
sample.check_consistent_shape()
if np.any(self.patch_size > sample.spatial_shape):
message = (
f'Patch size {tuple(self.patch_size)} cannot be'
f' larger than image size {tuple(sample.spatial_shape)}'
)
raise RuntimeError(message)
probability_map = self.get_probability_map(sample)
probability_map = self.process_probability_map(probability_map)
cdf, sort_indices = self.get_cumulative_distribution_function(
probability_map)
patches_left = num_patches if num_patches is not None else True
while patches_left:
yield self.extract_patch(sample, probability_map, cdf, sort_indices)
if num_patches is not None:
patches_left -= 1
示例5: get_probability_map
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def get_probability_map(self, sample: Subject) -> torch.Tensor:
if self.probability_map_name in sample:
data = sample[self.probability_map_name].data
else:
message = (
f'Image "{self.probability_map_name}"'
f' not found in subject sample: {sample}'
)
raise KeyError(message)
if torch.any(data < 0):
message = (
'Negative values found'
f' in probability map "{self.probability_map_name}"'
)
raise ValueError(message)
return data
示例6: test_terner_connect_sto_forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def test_terner_connect_sto_forward():
x = torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1)
results = list()
for i in range(1000):
temp_result = TernaryConnectStochastic.apply(x)
# Tensor must have only -1 , 0 , 1 values
assert not torch.any(torch.lt(torch.abs(temp_result-1),1e-8)*torch.lt(torch.abs(temp_result),1e-8))
results.append(temp_result)
result = torch.cat(results,0 )
result = torch.sum(result, 0)/1000
assert equals(
result,
torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1),
5e-2)
示例7: _get_labels_to_tasks
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def _get_labels_to_tasks(
self, label_names: Iterable[str], remap_labels: Dict[str, Optional[str]] = {}
) -> Dict[str, str]:
"""Map each label to its corresponding task outputs based on whether the task is available.
If remap_labels specified, overrides specific label -> task mappings.
If a label is mappied to `None`, that key is removed from the mapping.
"""
labels_to_tasks = {}
for label in label_names:
# Override any existing label -> task mappings
if label in remap_labels:
task = remap_labels.get(label)
# Note: task might be manually remapped to None to remove it from the labels_to_tasks
if task is not None:
labels_to_tasks[label] = task
# If available in task flows, label should map to task of same name
elif label in self.op_sequences:
labels_to_tasks[label] = label
return labels_to_tasks
示例8: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def __init__(self, mean: torch.Tensor, std: torch.Tensor, inplace: bool = False):
tensor_mean = Normalize._transform_to_tensor(mean, "mean")
tensor_std = Normalize._transform_to_tensor(std, "std")
Normalize._check_shape(tensor_mean, "mean")
Normalize._check_shape(tensor_std, "std")
if torch.any(tensor_std == 0):
raise ValueError(
"One or more std values are zero which would lead to division by zero."
)
super().__init__()
self.register_buffer("mean", tensor_mean)
self.register_buffer("std", tensor_std)
self.inplace: bool = inplace
示例9: __next__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def __next__(self):
# Stop criterion. For CUDA, inplace masking the array is too slow, so the matrix is
# unchanged. On CPU, we continually modify the matrix by removing rows.
if self.CUDA:
if not _torch.any(self.kept_mask).item():
raise StopIteration
elif len(self.matrix) == 0:
raise StopIteration
cluster, medoid, points = self._findcluster()
self.nclusters += 1
for point in points:
self.kept_mask[point] = 0
# Remove all points that's been clustered away. Is slow it itself, but speeds up
# distance calculation by having fewer points. Worth it on CPU, not on GPU
if not self.CUDA:
_vambtools.torch_inplace_maskarray(self.matrix, self.kept_mask)
self.indices = self.indices[self.kept_mask] # no need to inplace mask small array
self.kept_mask.resize_(len(self.matrix))
self.kept_mask[:] = 1
return cluster
示例10: _normalize
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def _normalize(matrix, inplace=False):
"""Preprocess the matrix to make distance calculations faster.
The distance functions in this module assumes input has been normalized
and will not work otherwise.
"""
if isinstance(matrix, _np.ndarray):
matrix = _torch.from_numpy(matrix)
if not inplace:
matrix = matrix.clone()
# If any rows are kept all zeros, the distance function will return 0.5 to all points
# inclusive itself, which can break the code in this module
zeromask = matrix.sum(dim=1) == 0
matrix[zeromask] = 1/matrix.shape[1]
matrix /= (matrix.norm(dim=1).reshape(-1, 1) * (2 ** 0.5))
return matrix
示例11: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import any [as 別名]
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
return (pred * weight).sum() # 0
if weight is not None: # iou loss is single unit
weight = weight[:, 0]
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * iou_loss(
pred,
target,
weight,
linear=self.linear,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss