本文整理匯總了Python中allennlp.training.metrics.BooleanAccuracy方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.BooleanAccuracy方法的具體用法?Python metrics.BooleanAccuracy怎麽用?Python metrics.BooleanAccuracy使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類allennlp.training.metrics
的用法示例。
在下文中一共展示了metrics.BooleanAccuracy方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_accuracy_computation
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def test_accuracy_computation(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.tensor([[0, 1], [2, 3], [4, 5], [6, 7]], device=device)
targets = torch.tensor([[0, 1], [2, 2], [4, 5], [7, 7]], device=device)
accuracy(predictions, targets)
assert accuracy.get_metric() == 2 / 4
mask = torch.ones(4, 2, device=device).bool()
mask[1, 1] = 0
accuracy(predictions, targets, mask)
assert accuracy.get_metric() == 5 / 8
targets[1, 1] = 3
accuracy(predictions, targets)
assert accuracy.get_metric() == 8 / 12
accuracy.reset()
accuracy(predictions, targets)
assert accuracy.get_metric() == 3 / 4
示例2: test_accuracy_computation
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def test_accuracy_computation(self):
accuracy = BooleanAccuracy()
predictions = torch.Tensor([[0, 1],
[2, 3],
[4, 5],
[6, 7]])
targets = torch.Tensor([[0, 1],
[2, 2],
[4, 5],
[7, 7]])
accuracy(predictions, targets)
assert accuracy.get_metric() == 2. / 4
mask = torch.ones(4, 2)
mask[1, 1] = 0
accuracy(predictions, targets, mask)
assert accuracy.get_metric() == 5. / 8
targets[1, 1] = 3
accuracy(predictions, targets)
assert accuracy.get_metric() == 8. / 12
accuracy.reset()
accuracy(predictions, targets)
assert accuracy.get_metric() == 3. / 4
示例3: test_skips_completely_masked_instances
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def test_skips_completely_masked_instances(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.tensor([[0, 1], [2, 3], [4, 5], [6, 7]], device=device)
targets = torch.tensor([[0, 1], [2, 2], [4, 5], [7, 7]], device=device)
mask = torch.tensor(
[[False, False], [True, False], [True, True], [True, True]], device=device
)
accuracy(predictions, targets, mask)
# First example should be skipped, second is correct with mask, third is correct, fourth is wrong.
assert accuracy.get_metric() == 2 / 3
示例4: test_incorrect_gold_labels_shape_catches_exceptions
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def test_incorrect_gold_labels_shape_catches_exceptions(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.rand([5, 7], device=device)
incorrect_shape_labels = torch.rand([5, 8], device=device)
with pytest.raises(ValueError):
accuracy(predictions, incorrect_shape_labels)
示例5: test_incorrect_mask_shape_catches_exceptions
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def test_incorrect_mask_shape_catches_exceptions(self, device: str):
accuracy = BooleanAccuracy()
predictions = torch.rand([5, 7], device=device)
labels = torch.rand([5, 7], device=device)
incorrect_shape_mask = torch.randint(0, 2, [5, 8], device=device).bool()
with pytest.raises(ValueError):
accuracy(predictions, labels, incorrect_shape_mask)
示例6: test_does_not_divide_by_zero_with_no_count
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def test_does_not_divide_by_zero_with_no_count(self, device: str):
accuracy = BooleanAccuracy()
assert accuracy.get_metric() == pytest.approx(0.0)
示例7: __init__
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
phrase_layer: Seq2SeqEncoder,
projected_layer: Seq2SeqEncoder,
flow_layer: Seq2SeqEncoder,
contextual_passage: Seq2SeqEncoder,
contextual_question: Seq2SeqEncoder,
dropout: float = 0.2,
regularizer: Optional[RegularizerApplicator] = None,
initializer: InitializerApplicator = InitializerApplicator(),
):
super(MultiGranularityHierarchicalAttentionFusionNetworks, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._phrase_layer = phrase_layer
self._encoding_dim = self._phrase_layer.get_output_dim()
self.projected_layer = torch.nn.Linear(self._encoding_dim + 1024, self._encoding_dim)
self.fuse = FusionLayer(self._encoding_dim)
self.projected_lstm = projected_layer
self.flow = flow_layer
self.contextual_layer_p = contextual_passage
self.contextual_layer_q = contextual_question
self.linear_self_align = torch.nn.Linear(self._encoding_dim, 1)
self.bilinear_layer_s = BilinearSeqAtt(self._encoding_dim, self._encoding_dim)
self.bilinear_layer_e = BilinearSeqAtt(self._encoding_dim, self._encoding_dim)
self.yesno_predictor = torch.nn.Linear(self._encoding_dim, 3)
self.relu = torch.nn.ReLU()
self._max_span_length = 30
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
self._span_yesno_accuracy = CategoricalAccuracy()
self._official_f1 = Average()
self._variational_dropout = InputVariationalDropout(dropout)
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例8: __init__
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
question_encoder: Seq2SeqEncoder,
passage_encoder: Seq2SeqEncoder,
pair_encoder: AttentionEncoder,
self_encoder: AttentionEncoder,
output_layer: QAOutputLayer,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
share_encoder: bool = False):
super().__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.question_encoder = question_encoder
self.passage_encoder = passage_encoder
self.pair_encoder = pair_encoder
self.self_encoder = self_encoder
self.output_layer = output_layer
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
self.share_encoder = share_encoder
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例9: __init__
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def __init__(self, vocab ,
text_field_embedder ,
num_highway_layers ,
phrase_layer ,
similarity_function ,
modeling_layer ,
span_end_encoder ,
dropout = 0.2,
mask_lstms = True,
initializer = InitializerApplicator(),
regularizer = None) :
super(BidirectionalAttentionFlow, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._highway_layer = TimeDistributed(Highway(text_field_embedder.get_output_dim(),
num_highway_layers))
self._phrase_layer = phrase_layer
self._matrix_attention = LegacyMatrixAttention(similarity_function)
self._modeling_layer = modeling_layer
self._span_end_encoder = span_end_encoder
encoding_dim = phrase_layer.get_output_dim()
modeling_dim = modeling_layer.get_output_dim()
span_start_input_dim = encoding_dim * 4 + modeling_dim
self._span_start_predictor = TimeDistributed(torch.nn.Linear(span_start_input_dim, 1))
span_end_encoding_dim = span_end_encoder.get_output_dim()
span_end_input_dim = encoding_dim * 4 + span_end_encoding_dim
self._span_end_predictor = TimeDistributed(torch.nn.Linear(span_end_input_dim, 1))
# Bidaf has lots of layer dimensions which need to match up - these aren't necessarily
# obvious from the configuration files, so we check here.
check_dimensions_match(modeling_layer.get_input_dim(), 4 * encoding_dim,
u"modeling layer input dim", u"4 * encoding dim")
check_dimensions_match(text_field_embedder.get_output_dim(), phrase_layer.get_input_dim(),
u"text field embedder output dim", u"phrase layer input dim")
check_dimensions_match(span_end_encoder.get_input_dim(), 4 * encoding_dim + 3 * modeling_dim,
u"span end encoder input dim", u"4 * encoding dim + 3 * modeling dim")
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._mask_lstms = mask_lstms
initializer(self)
示例10: __init__
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
phrase_layer: Seq2SeqEncoder,
residual_encoder: Seq2SeqEncoder,
span_start_encoder: Seq2SeqEncoder,
span_end_encoder: Seq2SeqEncoder,
initializer: InitializerApplicator,
dropout: float = 0.2,
pair2vec_dropout: float = 0.15,
max_span_length: int = 30,
pair2vec_model_file: str = None,
pair2vec_config_file: str = None
) -> None:
super().__init__(vocab)
self._max_span_length = max_span_length
self._text_field_embedder = text_field_embedder
self._phrase_layer = phrase_layer
self._encoding_dim = phrase_layer.get_output_dim()
self.pair2vec = pair2vec_util.get_pair2vec(pair2vec_config_file, pair2vec_model_file)
self._pair2vec_dropout = torch.nn.Dropout(pair2vec_dropout)
self._matrix_attention = LinearMatrixAttention(self._encoding_dim, self._encoding_dim, 'x,y,x*y')
# atten_dim = self._encoding_dim * 4 + 600 if ablation_type == 'attn_over_rels' else self._encoding_dim * 4
atten_dim = self._encoding_dim * 4 + 600
self._merge_atten = TimeDistributed(torch.nn.Linear(atten_dim, self._encoding_dim))
self._residual_encoder = residual_encoder
self._self_attention = LinearMatrixAttention(self._encoding_dim, self._encoding_dim, 'x,y,x*y')
self._merge_self_attention = TimeDistributed(torch.nn.Linear(self._encoding_dim * 3,
self._encoding_dim))
self._span_start_encoder = span_start_encoder
self._span_end_encoder = span_end_encoder
self._span_start_predictor = TimeDistributed(torch.nn.Linear(self._encoding_dim, 1))
self._span_end_predictor = TimeDistributed(torch.nn.Linear(self._encoding_dim, 1))
self._squad_metrics = SquadEmAndF1()
initializer(self)
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._official_em = Average()
self._official_f1 = Average()
self._span_accuracy = BooleanAccuracy()
self._variational_dropout = InputVariationalDropout(dropout)
示例11: __init__
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def __init__(self, vocab: Vocabulary,
elmo_embedder: TextFieldEmbedder,
tokens_embedder: TextFieldEmbedder,
features_embedder: TextFieldEmbedder,
phrase_layer: Seq2SeqEncoder,
projected_layer: Seq2SeqEncoder,
contextual_passage: Seq2SeqEncoder,
contextual_question: Seq2SeqEncoder,
dropout: float = 0.2,
regularizer: Optional[RegularizerApplicator] = None,
initializer: InitializerApplicator = InitializerApplicator(),
):
super(MultiGranularityHierarchicalAttentionFusionNetworks, self).__init__(vocab, regularizer)
self.elmo_embedder = elmo_embedder
self.tokens_embedder = tokens_embedder
self.features_embedder = features_embedder
self._phrase_layer = phrase_layer
self._encoding_dim = self._phrase_layer.get_output_dim()
self.projected_layer = torch.nn.Linear(self._encoding_dim + 1024, self._encoding_dim)
self.fuse_p = FusionLayer(self._encoding_dim)
self.fuse_q = FusionLayer(self._encoding_dim)
self.fuse_s = FusionLayer(self._encoding_dim)
self.projected_lstm = projected_layer
self.contextual_layer_p = contextual_passage
self.contextual_layer_q = contextual_question
self.linear_self_align = torch.nn.Linear(self._encoding_dim, 1)
# self._self_attention = LinearMatrixAttention(self._encoding_dim, self._encoding_dim, 'x,y,x*y')
self._self_attention = BilinearMatrixAttention(self._encoding_dim, self._encoding_dim)
self.bilinear_layer_s = BilinearSeqAtt(self._encoding_dim, self._encoding_dim)
self.bilinear_layer_e = BilinearSeqAtt(self._encoding_dim, self._encoding_dim)
self.yesno_predictor = FeedForward(self._encoding_dim, self._encoding_dim, 3)
self.relu = torch.nn.ReLU()
self._max_span_length = 30
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
self._span_yesno_accuracy = CategoricalAccuracy()
self._official_f1 = Average()
self._variational_dropout = InputVariationalDropout(dropout)
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例12: __init__
# 需要導入模塊: from allennlp.training import metrics [as 別名]
# 或者: from allennlp.training.metrics import BooleanAccuracy [as 別名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
phrase_layer: Seq2SeqEncoder,
projected_layer: Seq2SeqEncoder,
contextual_passage: Seq2SeqEncoder,
contextual_question: Seq2SeqEncoder,
dropout: float = 0.2,
regularizer: Optional[RegularizerApplicator] = None,
initializer: InitializerApplicator = InitializerApplicator(),
):
super(MultiGranularityHierarchicalAttentionFusionNetworks, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._phrase_layer = phrase_layer
self._encoding_dim = self._phrase_layer.get_output_dim()
self.projected_layer = torch.nn.Linear(self._encoding_dim + 1024, self._encoding_dim)
self.fuse_p = FusionLayer(self._encoding_dim)
self.fuse_q = FusionLayer(self._encoding_dim)
self.fuse_s = FusionLayer(self._encoding_dim)
self.projected_lstm = projected_layer
self.contextual_layer_p = contextual_passage
self.contextual_layer_q = contextual_question
self.linear_self_align = torch.nn.Linear(self._encoding_dim, 1)
# self.bilinear_self_align = BilinearSelfAlign(self._encoding_dim)
# self._self_attention = LinearMatrixAttention(self._encoding_dim, self._encoding_dim, 'x,y,x*y')
self._self_attention = BilinearMatrixAttention(self._encoding_dim, self._encoding_dim)
self.bilinear_layer_s = BilinearSeqAtt(self._encoding_dim, self._encoding_dim)
self.bilinear_layer_e = BilinearSeqAtt(self._encoding_dim, self._encoding_dim)
self.yesno_predictor = torch.nn.Linear(self._encoding_dim, 3)
self.relu = torch.nn.ReLU()
self._max_span_length = 30
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
self._span_yesno_accuracy = CategoricalAccuracy()
self._official_f1 = Average()
self._variational_dropout = InputVariationalDropout(dropout)
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)