本文整理汇总了Python中allennlp.nn.RegularizerApplicator方法的典型用法代码示例。如果您正苦于以下问题:Python nn.RegularizerApplicator方法的具体用法?Python nn.RegularizerApplicator怎么用?Python nn.RegularizerApplicator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.nn
的用法示例。
在下文中一共展示了nn.RegularizerApplicator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
verbose_metrics: bool = False,
dropout: float = 0.2,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super(TextClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.dropout = torch.nn.Dropout(dropout)
self.num_classes = self.vocab.get_vocab_size("labels")
self.classifier_feedforward = torch.nn.Linear(self.text_field_embedder.get_output_dim() , self.num_classes)
self.label_accuracy = CategoricalAccuracy()
self.label_f1_metrics = {}
self.verbose_metrics = verbose_metrics
for i in range(self.num_classes):
self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例2: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
input_dim: int,
num_classes: int,
label_namespace: str = "labels",
feedforward: Optional[FeedForward] = None,
dropout: Optional[float] = None,
verbose_metrics: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self.label_namespace = label_namespace
self.input_dim = input_dim
self.num_classes = num_classes
self._verbose_metrics = verbose_metrics
if dropout:
self.dropout = torch.nn.Dropout(dropout)
else:
self.dropout = None
self._feedforward = feedforward
if self._feedforward is not None:
self.projection_layer = Linear(feedforward.get_output_dim(), self.num_classes)
else:
self.projection_layer = Linear(self.input_dim, self.num_classes)
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3),
"accuracy5": CategoricalAccuracy(top_k=5)
}
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例3: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
text_encoder: Seq2SeqEncoder,
classifier_feedforward: FeedForward,
verbose_metrics: False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super(TextClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.text_encoder = text_encoder
self.classifier_feedforward = classifier_feedforward
self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes)
self.label_accuracy = CategoricalAccuracy()
self.label_f1_metrics = {}
self.verbose_metrics = verbose_metrics
for i in range(self.num_classes):
self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
self.loss = torch.nn.CrossEntropyLoss()
self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True)
initializer(self)
示例4: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
include_start_end_transitions: bool = True,
dropout: Optional[float] = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self.label_namespace = 'labels'
self.num_tags = self.vocab.get_vocab_size(self.label_namespace)
# encode text
self.text_field_embedder = text_field_embedder
self.encoder = encoder
self.dropout = torch.nn.Dropout(dropout) if dropout else None
# crf
output_dim = self.encoder.get_output_dim()
self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_tags))
self.crf = ConditionalRandomField(self.num_tags, constraints=None, include_start_end_transitions=include_start_end_transitions)
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3)
}
for index, label in self.vocab.get_index_to_token_vocabulary(self.label_namespace).items():
self.metrics['F1_' + label] = F1Measure(positive_label=index)
initializer(self)
示例5: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
predictor_dropout=0.0,
labels_namespace: str = "labels",
detect_namespace: str = "d_tags",
verbose_metrics: bool = False,
label_smoothing: float = 0.0,
confidence: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(Seq2Labels, self).__init__(vocab, regularizer)
self.label_namespaces = [labels_namespace,
detect_namespace]
self.text_field_embedder = text_field_embedder
self.num_labels_classes = self.vocab.get_vocab_size(labels_namespace)
self.num_detect_classes = self.vocab.get_vocab_size(detect_namespace)
self.label_smoothing = label_smoothing
self.confidence = confidence
self.incorr_index = self.vocab.get_token_index("INCORRECT",
namespace=detect_namespace)
self._verbose_metrics = verbose_metrics
self.predictor_dropout = TimeDistributed(torch.nn.Dropout(predictor_dropout))
self.tag_labels_projection_layer = TimeDistributed(
Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_labels_classes))
self.tag_detect_projection_layer = TimeDistributed(
Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_detect_classes))
self.metrics = {"accuracy": CategoricalAccuracy()}
initializer(self)
示例6: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
projection_feedforward: FeedForward,
inference_encoder: Seq2SeqEncoder,
output_feedforward: FeedForward,
output_logit: FeedForward,
final_feedforward: FeedForward,
coverage_loss: CoverageLoss,
similarity_function: SimilarityFunction = DotProductSimilarity(),
dropout: float = 0.5,
contextualize_pair_comparators: bool = False,
pair_context_encoder: Seq2SeqEncoder = None,
pair_feedforward: FeedForward = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
# Need to send it verbatim because otherwise FromParams doesn't work appropriately.
super().__init__(vocab=vocab,
text_field_embedder=text_field_embedder,
encoder=encoder,
similarity_function=similarity_function,
projection_feedforward=projection_feedforward,
inference_encoder=inference_encoder,
output_feedforward=output_feedforward,
output_logit=output_logit,
final_feedforward=final_feedforward,
contextualize_pair_comparators=contextualize_pair_comparators,
coverage_loss=coverage_loss,
pair_context_encoder=pair_context_encoder,
pair_feedforward=pair_feedforward,
dropout=dropout,
initializer=initializer,
regularizer=regularizer)
self._answer_loss = torch.nn.CrossEntropyLoss()
self._accuracy = CategoricalAccuracy()
示例7: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
projection_feedforward: FeedForward,
inference_encoder: Seq2SeqEncoder,
output_feedforward: FeedForward,
output_logit: FeedForward,
final_feedforward: FeedForward,
coverage_loss: CoverageLoss,
similarity_function: SimilarityFunction = DotProductSimilarity(),
dropout: float = 0.5,
contextualize_pair_comparators: bool = False,
pair_context_encoder: Seq2SeqEncoder = None,
pair_feedforward: FeedForward = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab=vocab,
text_field_embedder=text_field_embedder,
encoder=encoder,
similarity_function=similarity_function,
projection_feedforward=projection_feedforward,
inference_encoder=inference_encoder,
output_feedforward=output_feedforward,
output_logit=output_logit,
final_feedforward=final_feedforward,
coverage_loss=coverage_loss,
contextualize_pair_comparators=contextualize_pair_comparators,
pair_context_encoder=pair_context_encoder,
pair_feedforward=pair_feedforward,
dropout=dropout,
initializer=initializer,
regularizer=regularizer)
self._ignore_index = -1
self._answer_loss = torch.nn.CrossEntropyLoss(ignore_index=self._ignore_index)
self._coverage_loss = coverage_loss
self._accuracy = CategoricalAccuracy()
self._entailment_f1 = F1Measure(self._label2idx["entailment"])
示例8: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
phrase_layer: Seq2SeqEncoder,
projected_layer: Seq2SeqEncoder,
flow_layer: Seq2SeqEncoder,
contextual_passage: Seq2SeqEncoder,
contextual_question: Seq2SeqEncoder,
dropout: float = 0.2,
regularizer: Optional[RegularizerApplicator] = None,
initializer: InitializerApplicator = InitializerApplicator(),
):
super(MultiGranularityHierarchicalAttentionFusionNetworks, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._phrase_layer = phrase_layer
self._encoding_dim = self._phrase_layer.get_output_dim()
self.projected_layer = torch.nn.Linear(self._encoding_dim + 1024, self._encoding_dim)
self.fuse = FusionLayer(self._encoding_dim)
self.projected_lstm = projected_layer
self.flow = flow_layer
self.contextual_layer_p = contextual_passage
self.contextual_layer_q = contextual_question
self.linear_self_align = torch.nn.Linear(self._encoding_dim, 1)
self.bilinear_layer_s = BilinearSeqAtt(self._encoding_dim, self._encoding_dim)
self.bilinear_layer_e = BilinearSeqAtt(self._encoding_dim, self._encoding_dim)
self.yesno_predictor = torch.nn.Linear(self._encoding_dim, 3)
self.relu = torch.nn.ReLU()
self._max_span_length = 30
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
self._span_yesno_accuracy = CategoricalAccuracy()
self._official_f1 = Average()
self._variational_dropout = InputVariationalDropout(dropout)
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例9: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self,
vocab: Vocabulary,
source_text_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
decoder: SeqDecoder,
tied_source_embedder_key: Optional[str] = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(CustomComposedSeq2Seq, self).__init__(vocab, regularizer)
self._source_text_embedder = source_text_embedder
self._encoder = encoder
self._decoder = decoder
if self._encoder.get_output_dim() != self._decoder.get_output_dim():
raise ConfigurationError(f"Encoder output dimension {self._encoder.get_output_dim()} should be"
f" equal to decoder dimension {self._decoder.get_output_dim()}.")
if tied_source_embedder_key:
if not isinstance(self._source_text_embedder, BasicTextFieldEmbedder):
raise ConfigurationError("Unable to tie embeddings,"
"Source text embedder is not an instance of `BasicTextFieldEmbedder`.")
source_embedder = self._source_text_embedder._token_embedders[tied_source_embedder_key]
if not isinstance(source_embedder, Embedding):
raise ConfigurationError("Unable to tie embeddings,"
"Selected source embedder is not an instance of `Embedding`.")
if source_embedder.get_output_dim() != self._decoder.target_embedder.get_output_dim():
raise ConfigurationError(f"Output Dimensions mismatch between"
f"source embedder and target embedder.")
self._source_text_embedder._token_embedders[tied_source_embedder_key] = self._decoder.target_embedder
initializer(self)
示例10: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
question_encoder: Seq2SeqEncoder,
passage_encoder: Seq2SeqEncoder,
pair_encoder: AttentionEncoder,
self_encoder: AttentionEncoder,
output_layer: QAOutputLayer,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
share_encoder: bool = False):
super().__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.question_encoder = question_encoder
self.passage_encoder = passage_encoder
self.pair_encoder = pair_encoder
self.self_encoder = self_encoder
self.output_layer = output_layer
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
self.share_encoder = share_encoder
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例11: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
calculate_span_f1: bool = None,
label_encoding: Optional[str] = None,
label_namespace: str = "labels",
verbose_metrics: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(SimpleTagger, self).__init__(vocab, regularizer)
self.label_namespace = label_namespace
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size(label_namespace)
self.encoder = encoder
self._verbose_metrics = verbose_metrics
self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
self.num_classes))
check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
"text field embedding dim", "encoder input dim")
# We keep calculate_span_f1 as a constructor argument for API consistency with
# the CrfTagger, even it is redundant in this class
# (label_encoding serves the same purpose).
if calculate_span_f1 and not label_encoding:
raise ConfigurationError("calculate_span_f1 is True, but "
"no label_encoding was specified.")
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3)
}
if calculate_span_f1 or label_encoding:
self._f1_metric = SpanBasedF1Measure(vocab,
tag_namespace=label_namespace,
label_encoding=label_encoding)
else:
self._f1_metric = None
initializer(self)
示例12: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self,
vocab: Vocabulary,
task: str,
encoder: Seq2SeqEncoder,
label_smoothing: float = 0.0,
dropout: float = 0.0,
adaptive: bool = False,
features: List[str] = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(TagDecoder, self).__init__(vocab, regularizer)
self.task = task
self.encoder = encoder
self.output_dim = encoder.get_output_dim()
self.label_smoothing = label_smoothing
self.num_classes = self.vocab.get_vocab_size(task)
self.adaptive = adaptive
self.features = features if features else []
self.metrics = {
"acc": CategoricalAccuracy(),
# "acc3": CategoricalAccuracy(top_k=3)
}
if self.adaptive:
# TODO
adaptive_cutoffs = [round(self.num_classes / 15), 3 * round(self.num_classes / 15)]
self.task_output = AdaptiveLogSoftmaxWithLoss(self.output_dim,
self.num_classes,
cutoffs=adaptive_cutoffs,
div_value=4.0)
else:
self.task_output = TimeDistributed(Linear(self.output_dim, self.num_classes))
self.feature_outputs = torch.nn.ModuleDict()
self.features_metrics = {}
for feature in self.features:
self.feature_outputs[feature] = TimeDistributed(Linear(self.output_dim,
vocab.get_vocab_size(feature)))
self.features_metrics[feature] = {
"acc": CategoricalAccuracy(),
}
initializer(self)
示例13: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self,
vocab: Vocabulary,
tasks: List[str],
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
decoders: Dict[str, Model],
post_encoder_embedder: TextFieldEmbedder = None,
dropout: float = 0.0,
word_dropout: float = 0.0,
mix_embedding: int = None,
layer_dropout: int = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(UdifyModel, self).__init__(vocab, regularizer)
self.tasks = sorted(tasks)
self.vocab = vocab
self.bert_vocab = BertTokenizer.from_pretrained("config/archive/bert-base-multilingual-cased/vocab.txt").vocab
self.text_field_embedder = text_field_embedder
self.post_encoder_embedder = post_encoder_embedder
self.shared_encoder = encoder
self.word_dropout = word_dropout
self.dropout = torch.nn.Dropout(p=dropout)
self.decoders = torch.nn.ModuleDict(decoders)
if mix_embedding:
self.scalar_mix = torch.nn.ModuleDict({
task: ScalarMixWithDropout(mix_embedding,
do_layer_norm=False,
dropout=layer_dropout)
for task in self.decoders
})
else:
self.scalar_mix = None
self.metrics = {}
for task in self.tasks:
if task not in self.decoders:
raise ConfigurationError(f"Task {task} has no corresponding decoder. Make sure their names match.")
check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
"text field embedding dim", "encoder input dim")
initializer(self)
self._count_params()
示例14: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
encoder_keys: List[str],
mask_key: str,
pair2vec_config_file: str,
pair2vec_model_file: str,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
similarity_function: SimilarityFunction,
projection_feedforward: FeedForward,
inference_encoder: Seq2SeqEncoder,
output_feedforward: FeedForward,
output_logit: FeedForward,
initializer: InitializerApplicator = InitializerApplicator(),
dropout: float = 0.5,
pair2vec_dropout: float = 0.0,
bidirectional_pair2vec: bool = True,
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self._vocab = vocab
self.pair2vec = util.get_pair2vec(pair2vec_config_file, pair2vec_model_file)
self._encoder_keys = encoder_keys
self._mask_key = mask_key
self._text_field_embedder = text_field_embedder
self._projection_feedforward = projection_feedforward
self._encoder = encoder
from allennlp.modules.matrix_attention import DotProductMatrixAttention
self._matrix_attention = DotProductMatrixAttention()
self._inference_encoder = inference_encoder
self._pair2vec_dropout = torch.nn.Dropout(pair2vec_dropout)
self._bidirectional_pair2vec = bidirectional_pair2vec
if dropout:
self.dropout = torch.nn.Dropout(dropout)
self.rnn_input_dropout = VariationalDropout(dropout)
else:
self.dropout = None
self.rnn_input_dropout = None
self._output_feedforward = output_feedforward
self._output_logit = output_logit
self._num_labels = vocab.get_vocab_size(namespace="labels")
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例15: __init__
# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import RegularizerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
similarity_function: SimilarityFunction,
projection_feedforward: FeedForward,
inference_encoder: Seq2SeqEncoder,
output_feedforward: FeedForward,
output_logit: FeedForward,
final_feedforward: FeedForward,
coverage_loss: CoverageLoss = None,
contextualize_pair_comparators: bool = False,
pair_context_encoder: Seq2SeqEncoder = None,
pair_feedforward: FeedForward = None,
optimize_coverage_for: List = ["entailment", "neutral"],
dropout: float = 0.5,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self._label2idx = self.vocab.get_token_to_index_vocabulary('labels')
self._text_field_embedder = text_field_embedder
self._entailment_comparator_layer_1 = EsimComparatorLayer1(encoder, dropout)
self._entailment_comparator_layer_2 = EsimComparatorLayer2(similarity_function)
self._td_entailment_comparator_layer_1 = TimeDistributed(self._entailment_comparator_layer_1)
self._td_entailment_comparator_layer_2 = TimeDistributed(self._entailment_comparator_layer_2)
self._entailment_comparator_layer_3plus_local = EsimComparatorLayer3Plus(projection_feedforward, inference_encoder,
output_feedforward, dropout)
self._td_entailment_comparator_layer_3plus_local = EachOutputTimeDistributed(self._entailment_comparator_layer_3plus_local)
self._entailment_comparator_layer_3plus_global = copy.deepcopy(self._entailment_comparator_layer_3plus_local)
self._contextualize_pair_comparators = contextualize_pair_comparators
if not self._contextualize_pair_comparators:
self._output_logit = output_logit
self._td_output_logit = TimeDistributed(self._output_logit)
self._final_feedforward = final_feedforward
self._td_final_feedforward = TimeDistributed(final_feedforward)
linear = torch.nn.Linear(2*self._entailment_comparator_layer_3plus_local.get_output_dim(),
self._final_feedforward.get_input_dim())
self._local_global_projection = torch.nn.Sequential(linear, torch.nn.ReLU())
if self._contextualize_pair_comparators:
self._pair_context_encoder = pair_context_encoder
self._td_pair_feedforward = TimeDistributed(pair_feedforward)
self._coverage_loss = coverage_loss
# Do not apply initializer. If you do, make sure it doesn't reinitialize transferred parameters.