本文整理汇总了Python中torch.Tensor.eq方法的典型用法代码示例。如果您正苦于以下问题:Python Tensor.eq方法的具体用法?Python Tensor.eq怎么用?Python Tensor.eq使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.Tensor
的用法示例。
在下文中一共展示了Tensor.eq方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import eq [as 别名]
def __call__(self, # type: ignore
predicted_indices: torch.Tensor,
predicted_labels: torch.Tensor,
gold_indices: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
Parameters
----------
predicted_indices : ``torch.Tensor``, required.
A tensor of head index predictions of shape (batch_size, timesteps).
predicted_labels : ``torch.Tensor``, required.
A tensor of arc label predictions of shape (batch_size, timesteps).
gold_indices : ``torch.Tensor``, required.
A tensor of the same shape as ``predicted_indices``.
gold_labels : ``torch.Tensor``, required.
A tensor of the same shape as ``predicted_labels``.
mask: ``torch.Tensor``, optional (default = None).
A tensor of the same shape as ``predicted_indices``.
"""
unwrapped = self.unwrap_to_tensors(predicted_indices, predicted_labels,
gold_indices, gold_labels, mask)
predicted_indices, predicted_labels, gold_indices, gold_labels, mask = unwrapped
mask = mask.long()
predicted_indices = predicted_indices.long()
predicted_labels = predicted_labels.long()
gold_indices = gold_indices.long()
gold_labels = gold_labels.long()
# Multiply by a mask donoting locations of
# gold labels which we should ignore.
for label in self._ignore_classes:
label_mask = gold_labels.eq(label)
mask = mask * (1 - label_mask).long()
correct_indices = predicted_indices.eq(gold_indices).long() * mask
unlabeled_exact_match = (correct_indices + (1 - mask)).prod(dim=-1)
correct_labels = predicted_labels.eq(gold_labels).long() * mask
correct_labels_and_indices = correct_indices * correct_labels
labeled_exact_match = (correct_labels_and_indices + (1 - mask)).prod(dim=-1)
self._unlabeled_correct += correct_indices.sum()
self._exact_unlabeled_correct += unlabeled_exact_match.sum()
self._labeled_correct += correct_labels_and_indices.sum()
self._exact_labeled_correct += labeled_exact_match.sum()
self._total_sentences += correct_indices.size(0)
self._total_words += correct_indices.numel() - (1 - mask).sum()
示例2: __call__
# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import eq [as 别名]
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ...).
gold_labels : ``torch.Tensor``, required.
A tensor of the same shape as ``predictions``.
mask: ``torch.Tensor``, optional (default = None).
A tensor of the same shape as ``predictions``.
"""
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
if mask is not None:
# We can multiply by the mask up front, because we're just checking equality below, and
# this way everything that's masked will be equal.
predictions = predictions * mask
gold_labels = gold_labels * mask
batch_size = predictions.size(0)
predictions = predictions.view(batch_size, -1)
gold_labels = gold_labels.view(batch_size, -1)
# The .prod() here is functioning as a logical and.
correct = predictions.eq(gold_labels).prod(dim=1).float()
count = torch.ones(gold_labels.size(0))
self._correct_count += correct.sum()
self._total_count += count.sum()
示例3: __call__
# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import eq [as 别名]
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the ``predictions`` tensor without the ``num_classes`` dimension.
mask: ``torch.Tensor``, optional (default = None).
A masking tensor the same size as ``gold_labels``.
"""
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
# Some sanity checks.
num_classes = predictions.size(-1)
if gold_labels.dim() != predictions.dim() - 1:
raise ConfigurationError("gold_labels must have dimension == predictions.size() - 1 but "
"found tensor of shape: {}".format(predictions.size()))
if (gold_labels >= num_classes).any():
raise ConfigurationError("A gold label passed to Categorical Accuracy contains an id >= {}, "
"the number of classes.".format(num_classes))
predictions = predictions.view((-1, num_classes))
gold_labels = gold_labels.view(-1).long()
if not self._tie_break:
# Top K indexes of the predictions (or fewer, if there aren't K of them).
# Special case topk == 1, because it's common and .max() is much faster than .topk().
if self._top_k == 1:
top_k = predictions.max(-1)[1].unsqueeze(-1)
else:
top_k = predictions.topk(min(self._top_k, predictions.shape[-1]), -1)[1]
# This is of shape (batch_size, ..., top_k).
correct = top_k.eq(gold_labels.unsqueeze(-1)).float()
else:
# prediction is correct if gold label falls on any of the max scores. distribute score by tie_counts
max_predictions = predictions.max(-1)[0]
max_predictions_mask = predictions.eq(max_predictions.unsqueeze(-1))
# max_predictions_mask is (rows X num_classes) and gold_labels is (batch_size)
# ith entry in gold_labels points to index (0-num_classes) for ith row in max_predictions
# For each row check if index pointed by gold_label is was 1 or not (among max scored classes)
correct = max_predictions_mask[torch.arange(gold_labels.numel()).long(), gold_labels].float()
tie_counts = max_predictions_mask.sum(-1)
correct /= tie_counts.float()
correct.unsqueeze_(-1)
if mask is not None:
correct *= mask.view(-1, 1).float()
self.total_count += mask.sum()
else:
self.total_count += gold_labels.numel()
self.correct_count += correct.sum()
示例4: __call__
# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import eq [as 别名]
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the ``predictions`` tensor without the ``num_classes`` dimension.
mask: ``torch.Tensor``, optional (default = None).
A masking tensor the same size as ``gold_labels``.
"""
# Get the data from the Variables.
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise ConfigurationError("A gold label passed to F1Measure contains an id >= {}, "
"the number of classes.".format(num_classes))
if mask is None:
mask = ones_like(gold_labels)
mask = mask.float()
gold_labels = gold_labels.float()
positive_label_mask = gold_labels.eq(self._positive_label).float()
negative_label_mask = 1.0 - positive_label_mask
argmax_predictions = predictions.max(-1)[1].float().squeeze(-1)
# True Negatives: correct non-positive predictions.
correct_null_predictions = (argmax_predictions !=
self._positive_label).float() * negative_label_mask
self._true_negatives += (correct_null_predictions.float() * mask).sum()
# True Positives: correct positively labeled predictions.
correct_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * positive_label_mask
self._true_positives += (correct_non_null_predictions * mask).sum()
# False Negatives: incorrect negatively labeled predictions.
incorrect_null_predictions = (argmax_predictions !=
self._positive_label).float() * positive_label_mask
self._false_negatives += (incorrect_null_predictions * mask).sum()
# False Positives: incorrect positively labeled predictions
incorrect_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * negative_label_mask
self._false_positives += (incorrect_non_null_predictions * mask).sum()
示例5: __call__
# 需要导入模块: from torch import Tensor [as 别名]
# 或者: from torch.Tensor import eq [as 别名]
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, ...).
gold_labels : ``torch.Tensor``, required.
A tensor of the same shape as ``predictions``.
mask: ``torch.Tensor``, optional (default = None).
A tensor of the same shape as ``predictions``.
"""
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
batch_size = predictions.size(0)
if mask is not None:
# We can multiply by the mask up front, because we're just checking equality below, and
# this way everything that's masked will be equal.
predictions = predictions * mask
gold_labels = gold_labels * mask
# We want to skip predictions that are completely masked;
# so we'll keep predictions that aren't.
keep = mask.view(batch_size, -1).max(dim=1)[0].float()
else:
keep = torch.ones(batch_size).float()
predictions = predictions.view(batch_size, -1)
gold_labels = gold_labels.view(batch_size, -1)
# At this point, predictions is (batch_size, rest_of_dims_combined),
# so .eq -> .prod will be 1 if every element of the instance prediction is correct
# and 0 if at least one element of the instance prediction is wrong.
# Because of how we're handling masking, masked positions are automatically "correct".
correct = predictions.eq(gold_labels).prod(dim=1).float()
# Since masked positions are correct, we need to explicitly exclude instance predictions
# where the entire prediction is masked (because they look "correct").
self._correct_count += (correct * keep).sum()
self._total_count += keep.sum()